From decaca50f468882b854d32951f221bff3d7bb958 Mon Sep 17 00:00:00 2001
From: Netdata bot <43409846+netdatabot@users.noreply.github.com>
Date: Mon, 21 Oct 2024 06:41:34 -0400
Subject: [PATCH] Regenerate integrations.js (#18834)
Co-authored-by: ilyam8 <22274335+ilyam8@users.noreply.github.com>
---
integrations/integrations.js | 23 +++++++-
integrations/integrations.json | 23 +++++++-
.../logs/integrations/windows_event_logs.md | 53 +++++++++++++++++++
.../nvidia_smi/integrations/nvidia_gpu.md | 5 +-
4 files changed, 100 insertions(+), 4 deletions(-)
create mode 100644 integrations/logs/integrations/windows_event_logs.md
diff --git a/integrations/integrations.js b/integrations/integrations.js
index 3c00a001e6ecc0..e42185952ce7f3 100644
--- a/integrations/integrations.js
+++ b/integrations/integrations.js
@@ -5696,7 +5696,7 @@ export const integrations = [
"setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is \"nvidia_smi\" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | The maximum duration, in seconds, to wait for an `nvidia-smi` command to complete. This setting applies differently based on the collector's mode. **Loop Mode:** In loop mode, the timeout primarily determines how long to wait for the initial `nvidia-smi` execution. If the initial query takes longer than the timeout, the collector may report an error. For systems with multiple GPUs, the initial load time can sometimes be significant (e.g., 5-10 seconds). **Regular Mode:** If the collector is in regular mode, the timeout specifies how long to wait for each individual `nvidia-smi` execution. | 10 | no |\n| loop_mode | When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n{% /details %}\n",
"troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvidia_smi\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nvidia_smi` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nvidia_smi\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nvidia_smi /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nvidia_smi\n```\n\n",
"alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n",
- "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % |\n| nvidia_smi.gpu_utilization | gpu | % |\n| nvidia_smi.gpu_memory_utilization | memory | % |\n| nvidia_smi.gpu_decoder_utilization | decoder | % |\n| nvidia_smi.gpu_encoder_utilization | encoder | % |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B |\n| nvidia_smi.gpu_temperature | temperature | Celsius |\n| nvidia_smi.gpu_voltage | voltage | V |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz |\n| nvidia_smi.gpu_power_draw | power_draw | Watts |\n| nvidia_smi.gpu_performance_state | P0-P15 | state |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status |\n| nvidia_smi.gpu_mig_devices_count | mig | devices |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B |\n\n",
+ "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |\n| index | GPU index (nvidia_smi typically orders GPUs by PCI bus ID) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % |\n| nvidia_smi.gpu_utilization | gpu | % |\n| nvidia_smi.gpu_memory_utilization | memory | % |\n| nvidia_smi.gpu_decoder_utilization | decoder | % |\n| nvidia_smi.gpu_encoder_utilization | encoder | % |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B |\n| nvidia_smi.gpu_temperature | temperature | Celsius |\n| nvidia_smi.gpu_voltage | voltage | V |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz |\n| nvidia_smi.gpu_power_draw | power_draw | Watts |\n| nvidia_smi.gpu_performance_state | P0-P15 | state |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status |\n| nvidia_smi.gpu_mig_devices_count | mig | devices |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B |\n\n",
"integration_type": "collector",
"id": "go.d.plugin-nvidia_smi-Nvidia_GPU",
"edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nvidia_smi/metadata.yaml",
@@ -22351,6 +22351,27 @@ export const integrations = [
"integration_type": "logs",
"edit_link": "https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml"
},
+ {
+ "id": "windows-event-logs",
+ "meta": {
+ "name": "Windows Event Logs",
+ "link": "https://github.com/netdata/netdata/blob/master/src/collectors/windows-events.plugin/README.md",
+ "categories": [
+ "logs",
+ "data-collection.windows-systems"
+ ],
+ "icon_filename": "windows.svg"
+ },
+ "keywords": [
+ "windows",
+ "windows events",
+ "logs"
+ ],
+ "overview": "# Windows Event Logs\n\nThe Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and\nefficient.\n\nThe plugin automatically detects all the available channels and offers a list of \"Event Channels\".\n\nBy default, it aggregates events from all event channels, providing a unified view of all events.\n\n\n## Visualization\n\nYou can start exploring Windows event logs on the \"Logs\" tab of the Netdata UI.\n\n\n## Key features\n\n- Supports **Windows Event Logs (WEL)**.\n- Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log.\n- Allows filtering on all System Events fields.\n- Allows **full text search** (`grep`) on all System and User fields.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any\n time-frame.\n- Supports coloring log entries based on severity.\n- In PLAY mode it \"tails\" all the Events, showing new log entries immediately after they are received.\n",
+ "setup": "## Setup\n\n## Prerequisites\n\n- Netdata Cloud paid subscription\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n",
+ "integration_type": "logs",
+ "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml"
+ },
{
"id": "oidc-authentication",
"meta": {
diff --git a/integrations/integrations.json b/integrations/integrations.json
index d50d2fd53019b6..3f690574adc0c2 100644
--- a/integrations/integrations.json
+++ b/integrations/integrations.json
@@ -5694,7 +5694,7 @@
"setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is \"nvidia_smi\" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | The maximum duration, in seconds, to wait for an `nvidia-smi` command to complete. This setting applies differently based on the collector's mode. **Loop Mode:** In loop mode, the timeout primarily determines how long to wait for the initial `nvidia-smi` execution. If the initial query takes longer than the timeout, the collector may report an error. For systems with multiple GPUs, the initial load time can sometimes be significant (e.g., 5-10 seconds). **Regular Mode:** If the collector is in regular mode, the timeout specifies how long to wait for each individual `nvidia-smi` execution. | 10 | no |\n| loop_mode | When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option. | yes | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n",
"troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvidia_smi\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `nvidia_smi` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep nvidia_smi\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep nvidia_smi /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep nvidia_smi\n```\n\n",
"alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n",
- "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % |\n| nvidia_smi.gpu_utilization | gpu | % |\n| nvidia_smi.gpu_memory_utilization | memory | % |\n| nvidia_smi.gpu_decoder_utilization | decoder | % |\n| nvidia_smi.gpu_encoder_utilization | encoder | % |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B |\n| nvidia_smi.gpu_temperature | temperature | Celsius |\n| nvidia_smi.gpu_voltage | voltage | V |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz |\n| nvidia_smi.gpu_power_draw | power_draw | Watts |\n| nvidia_smi.gpu_performance_state | P0-P15 | state |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status |\n| nvidia_smi.gpu_mig_devices_count | mig | devices |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B |\n\n",
+ "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |\n| index | GPU index (nvidia_smi typically orders GPUs by PCI bus ID) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % |\n| nvidia_smi.gpu_utilization | gpu | % |\n| nvidia_smi.gpu_memory_utilization | memory | % |\n| nvidia_smi.gpu_decoder_utilization | decoder | % |\n| nvidia_smi.gpu_encoder_utilization | encoder | % |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B |\n| nvidia_smi.gpu_temperature | temperature | Celsius |\n| nvidia_smi.gpu_voltage | voltage | V |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz |\n| nvidia_smi.gpu_power_draw | power_draw | Watts |\n| nvidia_smi.gpu_performance_state | P0-P15 | state |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status |\n| nvidia_smi.gpu_mig_devices_count | mig | devices |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B |\n\n",
"integration_type": "collector",
"id": "go.d.plugin-nvidia_smi-Nvidia_GPU",
"edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nvidia_smi/metadata.yaml",
@@ -22349,6 +22349,27 @@
"integration_type": "logs",
"edit_link": "https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml"
},
+ {
+ "id": "windows-event-logs",
+ "meta": {
+ "name": "Windows Event Logs",
+ "link": "https://github.com/netdata/netdata/blob/master/src/collectors/windows-events.plugin/README.md",
+ "categories": [
+ "logs",
+ "data-collection.windows-systems"
+ ],
+ "icon_filename": "windows.svg"
+ },
+ "keywords": [
+ "windows",
+ "windows events",
+ "logs"
+ ],
+ "overview": "# Windows Event Logs\n\nThe Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and\nefficient.\n\nThe plugin automatically detects all the available channels and offers a list of \"Event Channels\".\n\nBy default, it aggregates events from all event channels, providing a unified view of all events.\n\n\n## Visualization\n\nYou can start exploring Windows event logs on the \"Logs\" tab of the Netdata UI.\n\n\n## Key features\n\n- Supports **Windows Event Logs (WEL)**.\n- Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log.\n- Allows filtering on all System Events fields.\n- Allows **full text search** (`grep`) on all System and User fields.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any\n time-frame.\n- Supports coloring log entries based on severity.\n- In PLAY mode it \"tails\" all the Events, showing new log entries immediately after they are received.\n",
+ "setup": "## Setup\n\n## Prerequisites\n\n- Netdata Cloud paid subscription\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n",
+ "integration_type": "logs",
+ "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml"
+ },
{
"id": "oidc-authentication",
"meta": {
diff --git a/integrations/logs/integrations/windows_event_logs.md b/integrations/logs/integrations/windows_event_logs.md
new file mode 100644
index 00000000000000..486f5d7fec00e8
--- /dev/null
+++ b/integrations/logs/integrations/windows_event_logs.md
@@ -0,0 +1,53 @@
+
+
+# Windows Event Logs
+
+
+
+
+
+The Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and
+efficient.
+
+The plugin automatically detects all the available channels and offers a list of "Event Channels".
+
+By default, it aggregates events from all event channels, providing a unified view of all events.
+
+
+
+
+## Visualization
+
+You can start exploring Windows event logs on the "Logs" tab of the Netdata UI.
+
+
+## Key features
+
+- Supports **Windows Event Logs (WEL)**.
+- Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log.
+- Allows filtering on all System Events fields.
+- Allows **full text search** (`grep`) on all System and User fields.
+- Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any
+ time-frame.
+- Supports coloring log entries based on severity.
+- In PLAY mode it "tails" all the Events, showing new log entries immediately after they are received.
+
+
+## Setup
+
+## Prerequisites
+
+- Netdata Cloud paid subscription
+
+
+## Configuration
+
+There is no configuration needed for this integration.
+
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md b/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md
index b4c79c388e6dc8..c5ce98ba29f51d 100644
--- a/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md
+++ b/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md
@@ -63,7 +63,8 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| uuid | GPU id (e.g. 00000000:00:04.0) |
+| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |
+| index | GPU index (nvidia_smi typically orders GPUs by PCI bus ID) |
| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |
Metrics:
@@ -95,7 +96,7 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| uuid | GPU id (e.g. 00000000:00:04.0) |
+| uuid | GPU uuid (e.g. GPU-27b94a00-ed54-5c24-b1fd-1054085de32a) |
| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |
| gpu_instance_id | GPU instance id (e.g. 1) |