From 5c8ebf9b5be35284e7ff7cc419552e7b27f9b44e Mon Sep 17 00:00:00 2001 From: Caio Ramos Casimiro Date: Thu, 9 May 2024 11:15:31 +0100 Subject: [PATCH] feat(proxy-wasm) metrics This commit adds support for metrics in the form of counters, gauges and histograms. This commit adds support for storing metrics in WasmX shared-memory key-value store facility. The workflow users are expected to perform follows from Proxy-Wasm metrics ABI itself: users define metrics before using them; when a metric is defined a numeric ID is returned which can be used later for reading or updating a metric. If a metric is defined and the system is out of metrics memory, then the metric definition fails as eviction support hasn't been implemented. The implemented design, described at [1], allows users to perform most metric updates without synchronizing Nginx workers, i.e. without the aid of locks. Users can refer to [2] for a description of how metrics are represented in memory and how to estimate the size of the shared-memory used for metrics storage. Two configuration directives, `slab_size` and `max_metric_name_length`, are added to configure the size of the shared-memory zone dedicated to metrics and the maximum length of a metric name, respectively. [1] docs/adr/005-metrics.md [2] docs/METRICS.md --- config | 9 +- docs/DIRECTIVES.md | 53 +- docs/METRICS.md | 97 ++++ docs/PROXY_WASM.md | 8 +- docs/adr/005-metrics.md | 219 ++++++++ src/common/debug/ngx_wasm_debug_module.c | 26 + src/common/metrics/ngx_wa_histogram.c | 234 +++++++++ src/common/metrics/ngx_wa_histogram.h | 16 + src/common/metrics/ngx_wa_metrics.c | 490 ++++++++++++++++++ src/common/metrics/ngx_wa_metrics.h | 79 +++ src/common/proxy_wasm/ngx_proxy_wasm.h | 2 - src/common/proxy_wasm/ngx_proxy_wasm_host.c | 146 +++++- src/common/shm/ngx_wasm_shm.h | 1 + src/common/shm/ngx_wasm_shm_kv.c | 26 +- src/common/shm/ngx_wasm_shm_kv.h | 20 + src/ngx_wasmx.c | 32 ++ src/ngx_wasmx.h | 8 +- src/wasm/ngx_wasm.h | 11 +- src/wasm/ngx_wasm_core_module.c | 22 + src/wasm/ngx_wasm_directives.c | 120 ++++- t/01-wasm/directives/011-metrics_directives.t | 120 +++++ .../hfuncs/contexts/150-proxy_define_metric.t | 170 ++++++ .../contexts/151-proxy_increment_metric.t | 172 ++++++ .../hfuncs/contexts/152-proxy_record_metric.t | 203 ++++++++ .../153-proxy_record_metric_histogram.t | 188 +++++++ .../metrics/001-define_metric_edge_cases.t | 123 +++++ .../hfuncs/metrics/002-get_metric_misuse.t | 32 ++ .../metrics/003-increment_metric_misuse.t | 60 +++ .../hfuncs/metrics/004-record_metric_misuse.t | 58 +++ .../metrics/005-record_metric_edge_cases.t | 74 +++ t/07-metrics/001-metrics_sighup.t | 199 +++++++ t/07-metrics/002-histograms_sighup.t | 186 +++++++ t/TestWasmX.pm | 18 + .../proxy-wasm-tests/hostcalls/src/filter.rs | 6 +- t/lib/proxy-wasm-tests/hostcalls/src/lib.rs | 82 ++- .../hostcalls/src/tests/mod.rs | 136 ++++- .../hostcalls/src/types/mod.rs | 21 + .../hostcalls/src/types/test_http.rs | 25 +- .../hostcalls/src/types/test_root.rs | 1 + util/setup_dev.sh | 24 + 40 files changed, 3439 insertions(+), 78 deletions(-) create mode 100644 docs/METRICS.md create mode 100644 docs/adr/005-metrics.md create mode 100644 src/common/metrics/ngx_wa_histogram.c create mode 100644 src/common/metrics/ngx_wa_histogram.h create mode 100644 src/common/metrics/ngx_wa_metrics.c create mode 100644 src/common/metrics/ngx_wa_metrics.h create mode 100644 t/01-wasm/directives/011-metrics_directives.t create mode 100644 t/03-proxy_wasm/hfuncs/contexts/150-proxy_define_metric.t create mode 100644 t/03-proxy_wasm/hfuncs/contexts/151-proxy_increment_metric.t create mode 100644 t/03-proxy_wasm/hfuncs/contexts/152-proxy_record_metric.t create mode 100644 t/03-proxy_wasm/hfuncs/contexts/153-proxy_record_metric_histogram.t create mode 100644 t/03-proxy_wasm/hfuncs/metrics/001-define_metric_edge_cases.t create mode 100644 t/03-proxy_wasm/hfuncs/metrics/002-get_metric_misuse.t create mode 100644 t/03-proxy_wasm/hfuncs/metrics/003-increment_metric_misuse.t create mode 100644 t/03-proxy_wasm/hfuncs/metrics/004-record_metric_misuse.t create mode 100644 t/03-proxy_wasm/hfuncs/metrics/005-record_metric_edge_cases.t create mode 100644 t/07-metrics/001-metrics_sighup.t create mode 100644 t/07-metrics/002-histograms_sighup.t diff --git a/config b/config index dd1c748ee..78e62d9e2 100644 --- a/config +++ b/config @@ -129,6 +129,7 @@ NGX_WASMX_INCS="\ $ngx_addon_dir/src/common \ $ngx_addon_dir/src/common/proxy_wasm \ $ngx_addon_dir/src/common/shm \ + $ngx_addon_dir/src/common/metrics \ $ngx_addon_dir/src/common/lua" NGX_WASMX_DEPS="\ @@ -141,7 +142,9 @@ NGX_WASMX_DEPS="\ $ngx_addon_dir/src/common/proxy_wasm/ngx_proxy_wasm_properties.h \ $ngx_addon_dir/src/common/shm/ngx_wasm_shm.h \ $ngx_addon_dir/src/common/shm/ngx_wasm_shm_kv.h \ - $ngx_addon_dir/src/common/shm/ngx_wasm_shm_queue.h" + $ngx_addon_dir/src/common/shm/ngx_wasm_shm_queue.h \ + $ngx_addon_dir/src/common/metrics/ngx_wa_histogram.h \ + $ngx_addon_dir/src/common/metrics/ngx_wa_metrics.h" NGX_WASMX_SRCS="\ $ngx_addon_dir/src/ngx_wasmx.c \ @@ -155,7 +158,9 @@ NGX_WASMX_SRCS="\ $ngx_addon_dir/src/common/proxy_wasm/ngx_proxy_wasm_util.c \ $ngx_addon_dir/src/common/shm/ngx_wasm_shm.c \ $ngx_addon_dir/src/common/shm/ngx_wasm_shm_kv.c \ - $ngx_addon_dir/src/common/shm/ngx_wasm_shm_queue.c" + $ngx_addon_dir/src/common/shm/ngx_wasm_shm_queue.c \ + $ngx_addon_dir/src/common/metrics/ngx_wa_histogram.c \ + $ngx_addon_dir/src/common/metrics/ngx_wa_metrics.c" # wasm diff --git a/docs/DIRECTIVES.md b/docs/DIRECTIVES.md index 950d17f56..fddec6121 100644 --- a/docs/DIRECTIVES.md +++ b/docs/DIRECTIVES.md @@ -6,6 +6,7 @@ By alphabetical order: - [cache_config](#cache-config) - [compiler](#compiler) - [flag](#flag) +- [max_metric_name_length](#max_metric_name_length) - [module](#module) - [proxy_wasm](#proxy_wasm) - [proxy_wasm_isolation](#proxy_wasm_isolation) @@ -16,6 +17,7 @@ By alphabetical order: - [resolver_timeout](#resolver_timeout) - [shm_kv](#shm_kv) - [shm_queue](#shm_queue) +- [slab_size](#slab_size) - [socket_buffer_size](#socket_buffer_size) - [socket_buffer_reuse](#socket_buffer_reuse) - [socket_connect_timeout](#socket_connect_timeout) @@ -57,6 +59,9 @@ By context: - [tls_trusted_certificate](#tls_trusted_certificate) - [tls_verify_cert](#tls_verify_cert) - [tls_verify_host](#tls_verify_host) + - `metrics{}` + - [max_metric_name_length](#max_metric_name_length) + - [slab_size](#slab_size) - `wasmtime{}` - [cache_config](#cache-config) - [flag](#flag) @@ -205,6 +210,24 @@ wasm { [Back to TOC](#directives) +max_metric_name_length +--------- + +**usage** | `max_metric_name_length ;` +------------:|:---------------------------------------------------------------- +**contexts** | `metrics{}` +**default** | `256` +**example** | `max_metric_name_length 512;` + +Set the maximum allowed length of a metric name. + +> Notes + +See [Metrics] for a complete description of how metrics are represented in +memory. + +[Back to TOC](#directives) + module ------ @@ -525,6 +548,33 @@ policy, and writes will fail when the allocated memory slab is full. [Back to TOC](#directives) +slab_size +--------- + +**usage** | `slab_size ;` +------------:|:---------------------------------------------------------------- +**contexts** | `metrics{}` +**default** | `5m` +**example** | `slab_size 12m;` + +Set the `size` of the shared memory slab dedicated to metrics storage. The value +must be at least 3 * pagesize, e.g. `15k` on Linux. + +> Notes + +The space in memory occupied by a metric depends on its name length, type and +the number of worker processes running. As an example, if all metric names are +64 chars long and 4 workers are running, `5m` can accommodate 20k counters, 20k +gauges, or up to 16k histograms. + +See the [max_metric_name_length](#max_metric_name_length) directive to configure +the max name length in chars for metrics. + +See [Metrics] for a complete description of how metrics are represented in +memory. + +[Back to TOC](#directives) + socket_buffer_reuse ------------------- @@ -939,7 +989,8 @@ the `http{}` contexts. [Contexts]: USER.md#contexts [Execution Chain]: USER.md#execution-chain -[SLRU eviction algorithm]: SLRU.md +[Metrics]: METRICS.md [OpenResty]: https://openresty.org/en/ [resolver]: https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver [resolver_timeout]: https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_timeout +[SLRU eviction algorithm]: SLRU.md diff --git a/docs/METRICS.md b/docs/METRICS.md new file mode 100644 index 000000000..e2bf68d23 --- /dev/null +++ b/docs/METRICS.md @@ -0,0 +1,97 @@ +# Metrics + +## Introduction + +In the context of ngx_wasm_module, in accordance with Proxy-Wasm, a metric is +either a counter, a gauge or a histogram. + +A counter is an unsigned 64-bit int that can only be incremented. +A gauge is an unsigned 64-bit int that can take arbitrary values. + +## Histograms + +A histogram represents ranges frequencies of a variable and can be defined as a +set of pairs of range and counter. For example, the distribution of response +time of HTTP requests, can be represented as a histogram with ranges `[0, 1]`, +`(1, 2]`, `(2, 4]` and `(4, Inf]`. The 1st range's counter, would be the number +of requests with response time less or equal to 1ms; the 2nd range's counter, +requests with response time between 1ms and 2ms; the 3rd range's counter, +requests with response time between 2ms and 4ms; and the last range's counter, +requests with response time bigger than 4ms. + +### Binning + +The above example demonstrates a histogram with ranges, or bins, whose upper +bound grows in powers of 2, i.e. 2^0, 2^1 and 2^2. This is usually called +logarithmic binning and is indeed how histograms bins are represented in the +ngx_wasm_module. This binning strategy implicates that when a value `v` is +recorded, it is matched with the smallest power of two that's bigger than `v`; +this value is the upper bound of the bin associated with `v`; if the histogram +contain, or can contain, such bin, its counter is incremented; if not, the bin +with the next smallest upper bound bigger than `v` has its counter incremented. + +### Update and expansion + +Histograms are created with 5 bins, 1 initialized and 4 uninitialized. If a +value `v` is recorded and its bin isn't part of the initialized bins, one of the +uninitialized bins is initialized with the upper bound associated with `v` and +its counter is incremented. If the histogram is out of uninitialized bins, it +can be expanded, up to 18 bins, to accommodate the additional bin for `v`. The +bin initialized upon histogram creation has upper bound 2^32 and its counter is +incremented if it's the only bin whose upper bound is bigger than the recorded +value. + +## Memory consumption + +The space in memory occupied by a metric contains its name, value and the +underlying structure representing them in the key-value store. While the +key-value structure has a fixed size of 96 bytes, the sizes of name and value +vary. + +The size in memory of the value of a counter or gauge is 8 bytes plus 16 bytes +per worker process. The value size grows according to the number of workers +because metric value is segmented across them. Each worker has its own segment +of the value to write updates to. When a metric is retrieved, the segments are +consolidated and returned as a single metric. This storage strategy allows +metric updates to be performed without the aid of locks at the cost of 16 bytes +per worker. + +Histograms' values also have a baseline size of 8 bytes plus 16 bytes per +worker. However, histograms need extra space per worker for bins storage. Bins +storage costs 4 bytes plus 8 bytes per bin. So a 5-bin histogram takes 8 bytes +plus (16 + 4 + 5*8), 60 bytes per worker. + +As such, in a 4-workers setup, a counter or gauge whose name is 64 chars long +takes 168 bytes, a 5-bin histogram with the same name takes 408 bytes and a +18-bin histogram with the same name takes 824 bytes. + +### Shared memory allocation + +Nginx employs an allocation model for shared memory that enforces allocation +size to be a power of 2 and greater than 8; nonconforming values are rounded up, +see [Nginx shared memory]. + +This means that an allocation of 168 bytes, for instance, ends up taking 256 +bytes from the shared memory. This should be taken into account when estimating +the space required for a group of metrics. + +### Prefixing + +The name of a metric is always prefixed with `pw.{filter_name}.` to avoid naming +conflicts between Proxy-Wasm filters. This means that a metric named `a_counter` +by the filter `a_filter` ends up named as `pw.a_filter.a_counter`. +The maximum length of a metric name, configured via `max_metric_name_length`, +is enforced on the prefixed name and might need to be increased in some cases. + +## Nginx Reconfiguration + +If Nginx is reconfigured with a different number of workers or a different size +for the metrics shared memory zone, existing metrics need to be reallocated into +a brand new shared memory zone. This is due to the metric values being segmented +across workers. + +As such, it's important to ensure a new size of the metrics' shared memory zone +is enough to accommodate existing metrics and that the value of +`max_metric_name_len` isn't less than any existing metric name. + +[Nginx shared memory]: https://nginx.org/en/docs/dev/development_guide.html#shared_memory diff --git a/docs/PROXY_WASM.md b/docs/PROXY_WASM.md index 72495ea17..bcc05eae2 100644 --- a/docs/PROXY_WASM.md +++ b/docs/PROXY_WASM.md @@ -536,10 +536,10 @@ SDK ABI `0.2.1`) and their present status in ngx_wasm_module: `proxy_enqueue_shared_queue` | :heavy_check_mark: | No automatic eviction mechanism if the queue is full. `proxy_resolve_shared_queue` | :x: | *Stats/metrics* | | -`proxy_define_metric` | :x: | -`proxy_get_metric` | :x: | -`proxy_record_metric` | :x: | -`proxy_increment_metric` | :x: | +`proxy_define_metric` | :heavy_check_mark: | +`proxy_get_metric` | :heavy_check_mark: | +`proxy_record_metric` | :heavy_check_mark: | +`proxy_increment_metric` | :heavy_check_mark: | *Custom extension points* | | `proxy_call_foreign_function` | :x: | diff --git a/docs/adr/005-metrics.md b/docs/adr/005-metrics.md new file mode 100644 index 000000000..41a9530b1 --- /dev/null +++ b/docs/adr/005-metrics.md @@ -0,0 +1,219 @@ +# Metrics + +* Status: proposed +* Deciders: WasmX +* Date: 2024-05-03 + +## Table of Contents + +- [Problem Statement](#problem-statement) +- [Technical Context](#technical-context) +- [Decision Drivers](#decision-drivers) +- [Proposal](#proposal) + - [Histograms](#histograms) + - [Binning](#binning) + - [Allocation and update handling](#allocation-and-update-handling) + - [Growth](#growth) + +## Problem Statement + +Support definition, update and retrieval of metrics from Proxy-Wasm filters, +ngx_wasm_module itself and Lua land. How exactly are metrics stored and how +access to them is coordinated to ensure two Nginx workers never write to the +same memory space? + +[Back to TOC](#table-of-contents) + +## Technical Context + +A metric can be either a counter, a gauge or a histogram. +A counter is an integer that can only be incremented. +A gauge is an integer that can take arbitrary positive values. + +A histogram, used to represent ranges frequency of a variable, can be defined +as a set of pairs of range and counter. For example, the distribution of the +response time of a group of HTTP requests, can be represented as a histogram +with ranges `[0, 10]`, `(10, 100]` and `(100, Inf]`. The 1st range's counter, +would be the number of requests whose response time <= 10ms; the 2nd range's +counter, requests whose 10ms < response time <= l00ms; and the last range's +counter, requests whose response time > 100ms. + +A metric's value should reflect updates from all worker processes. If a counter +is `0`, after being incremented by workers 0 and 1, it should be `2` -- despite +the worker it's retrieved from. A gauge, however, is whatever value last set by +any of the workers. Histograms, like counters, account for values recorded by +all workers. + +[Back to TOC](#table-of-contents) + +## Decision Drivers + +* Full Proxy-Wasm ABI compatibility +* Build atop ngx_wasm_shm +* Minimize memory usage +* Minimize metrics access cost + +[Back to TOC](#table-of-contents) + +## Proposal + +The proposed scheme for metrics storage builds atop ngx_wasm_shm's key-value +store. Metric name is stored as a key in a red-black tree node along with metric +value. Metric value is represented by `ngx_wa_metric_t`, see below. The member +`type` is the metric type while the flexible array member `slots`, stores actual +metric data. + +The length of `slots` equals the number of worker processes running when the +metric is defined. This ensures each worker has its own dedicated slot to write +metric updates. + +For counters, each entry in the `slots` array is simply an unsigned integer that +its assigned worker increments. When a counter is retrieved, the values in the +`slots` array are then summed and returned. + +For gauges, each of the `slots` is a pair of unsigned integer and timestamp. +When a worker sets a gauge, the value is stored along with the time +it's being updated in its slot. When a gauge is retrieved, the values in the +`slots` are iterated and the most recent value is returned. + +For histograms, each of the `slots` points to a `ngx_wa_metrics_histogram_t` +instance. Each worker updates the histogram pointed to by its slot. When a +histogram is retrieved, the `slots` array is iterated and each worker's +histogram is merged into a temporary histogram, which can then be serialized. + +```c +typedef enum { + NGX_WA_METRIC_COUNTER, + NGX_WA_METRIC_GAUGE, + NGX_WA_METRIC_HISTOGRAM, +} ngx_wa_metric_type_e; + +typedef struct { + ngx_uint_t value; + ngx_msec_t last_update; +} ngx_wa_metrics_gauge_t; + + +typedef struct { + uint32_t upper_bound; + uint32_t count; +} ngx_wa_metrics_bin_t; + +typedef struct { + uint8_t n_bins; + ngx_wa_metrics_bin_t bins[]; +} ngx_wa_metrics_histogram_t; + + +typedef union { + ngx_uint_t counter; + ngx_wa_metrics_gauge_t gauge; + ngx_wa_metrics_histogram_t *histogram; +} ngx_wa_metric_val_t; + +typedef struct { + ngx_wa_metric_type_e type; + ngx_wa_metric_val_t slots[]; +} ngx_wa_metric_t; +``` + +This storage strategy ensures that two workers **never** write to the same +memory address when updating a metric as long as no memory allocation is +performed. This is indeed the case for counters and gauges and it's also the +case for most histogram updates. + +This is an important feature of this design as it allows the more frequent +update operations to be performed without the aid of locks. The cost of a +lock-less metric update then becomes merely the cost of searching the red-black +tree of the underlying key-value store, O(logn). + +The capacity of updating a metric without having to acquire a lock is +particularly attractive when a set of worker processes is under heavy load. In +such conditions, lock contention is likely to impact proxy throughput as workers +are more likely to wait for a lock to be released before proceeding with its +metric update and resume its workload. + +Metric definition and removal still require locks to be safely performed as two +workers might end up attempting to write to the same memory location. This is +also true for histogram updates which cause them to grow in number of `bins`. + +The ABI proposed to accomplish the described system closely resembles the one +from Proxy-Wasm specification itself: + +```c +ngx_int_t ngx_wa_metrics_add(ngx_wa_metrics_t *metrics, ngx_str_t *name, + ngx_wa_metric_type_e type, uint32_t *out); +ngx_int_t ngx_wa_metrics_get(ngx_wa_metrics_t *metrics, uint32_t metric_id, + ngx_uint_t *out); +ngx_int_t ngx_wa_metrics_increment(ngx_wa_metrics_t *metrics, + uint32_t metric_id, ngx_int_t val); +ngx_int_t ngx_wa_metrics_record(ngx_wa_metrics_t *metrics, uint32_t metric_id, + ngx_int_t val); +``` + +[Back to TOC](#table-of-contents) + +### Histograms + +This proposal includes a scheme composed of `ngx_wa_metrics_bin_t`, a pair of +upper bound and counter, and `ngx_wa_metrics_histogram_t`, a list of +`ngx_wa_metrics_bin_t` ordered by upper bound, to represent histogram data in +memory. A bin's counter is the number of recorded values less than or equal to +its upper bound and bigger than the previous bin's upper bound. + +This storage layout can represent both histograms with user-defined bins and +those following an automatic binning strategy, like logarithmic binning. This +document will focus, however, on logarithmic binning; user-defined bins are left +for a future iteration. + +[Back to TOC](#table-of-contents) + +#### Binning + +The proposed binning strategy assumes the domain of the variables being measured +is the set of nonnegative integers and divides this domain into bins whose upper +bound grows in powers of 2, i.e., 1, 2, 4, 8, 16, etc. The mapping of a value +`v` to its bin is given by the function `pow(2, ceil(log2(v)))` which calculates +the bin's upper bound. The value 10, for example, is mapped to the bin whose +upper bound is `pow(2, ceil(log2(10)))`, or 16. The bin with upper bound `16` +represents recorded values between `8` and `16`. + +This logarithmic scaling provides good enough resolution for small values in +return for low resolution for large values while keeping the memory footprint +reasonably low: values up to 65,536 can be represented with only 16 bins. These +characteristics fit the typical use case of measuring HTTP response time in +milliseconds. + +[Back to TOC](#table-of-contents) + +#### Allocation and update handling + +Histograms are created with enough space for 5 bins, one of which is initialized +with NGX_MAX_UINT32_VALUE as upper bound, leaving 4 uninitialized. + +If a value v is recorded into a histogram and its respective bin is part of the +histogram's bins, its counter is simply incremented. If not, and there's at +least one uninitialized bin, then one bin is initialized with v's upper bound, +the bins are rearranged to ensure ascending order with respect to upper bound, +and the new bin's counter is finally incremented. + +[Back to TOC](#table-of-contents) + +#### Expansion + +If a value v is recorded but its bin isn't part of the histogram's bins and +there aren't any uninitialized bins left, the histogram needs to grow to +accommodate the new value's bin. + +Expanding a histogram means allocating memory for a new histogram instance with +enough space for the additional bin, copying memory from the old instance to +the new one and finally releasing the old histogram's memory. The new +uninitialized bin is then initialized with v's upper bound and its counter is +incremented. + +Histograms, however, can only grow up to a maximum number of bins. When a value +`v` is recorded into a histogram, but its bin isn't part of the bins and the +histogram's reached the bin limit, the bin with the smallest upper bound bigger +than `v` is incremented. + +[Back to TOC](#table-of-contents) diff --git a/src/common/debug/ngx_wasm_debug_module.c b/src/common/debug/ngx_wasm_debug_module.c index ba78c4aa7..3aee09ecd 100644 --- a/src/common/debug/ngx_wasm_debug_module.c +++ b/src/common/debug/ngx_wasm_debug_module.c @@ -9,6 +9,8 @@ #include #endif +#include + #if (!NGX_DEBUG) # error ngx_wasm_debug_module included in a non-debug build #endif @@ -20,6 +22,11 @@ static ngx_int_t ngx_wasm_debug_init(ngx_cycle_t *cycle) { + size_t long_metric_name_len = NGX_MAX_ERROR_STR; + uint32_t mid; + ngx_str_t metric_name; + u_char buf[long_metric_name_len]; + static ngx_wasm_phase_t ngx_wasm_debug_phases[] = { { ngx_string("a_phase"), 0, 0, 0 }, { ngx_null_string, 0, 0, 0 } @@ -41,6 +48,25 @@ ngx_wasm_debug_init(ngx_cycle_t *cycle) ngx_wasm_phase_lookup(&ngx_wasm_debug_subsystem, 3) == NULL ); + metric_name.len = long_metric_name_len; + metric_name.data = buf; + + /* invalid metric name length */ + ngx_wa_assert( + ngx_wa_metrics_add(ngx_wasmx_metrics(cycle), + &metric_name, + NGX_WA_METRIC_COUNTER, + &mid) == NGX_ERROR + ); + + /* invalid metric type */ + ngx_wa_assert( + ngx_wa_metrics_add(ngx_wasmx_metrics(cycle), + &metric_name, + 100, + &mid) == NGX_ERROR + ); + return NGX_OK; } diff --git a/src/common/metrics/ngx_wa_histogram.c b/src/common/metrics/ngx_wa_histogram.c new file mode 100644 index 000000000..2296554a7 --- /dev/null +++ b/src/common/metrics/ngx_wa_histogram.c @@ -0,0 +1,234 @@ +#ifndef DDEBUG +#define DDEBUG 0 +#endif +#include "ddebug.h" + +#include +#include + +#define NGX_WA_INITIAL_BINS 5 +#define NGX_WA_MAX_BINS 18 +#define NGX_WA_BINS_INCREMENT 4 + + +static uint32_t +bin_log2_upper_bound(ngx_uint_t n) +{ + uint32_t upper_bound = 2; + + if (n <= 1) { + return 1; + } + + if (n > NGX_MAX_UINT32_VALUE / 2) { + return NGX_MAX_UINT32_VALUE; + } + + for (n = n - 1; n >>= 1; upper_bound <<= 1) { /* void */ } + + return upper_bound; +} + + +static ngx_int_t +histogram_grow(ngx_wa_metrics_t *metrics, ngx_wa_metrics_histogram_t *h, + ngx_wa_metrics_histogram_t **out) +{ + size_t old_size, size; + ngx_int_t rc = NGX_OK; + ngx_uint_t n; + ngx_wa_metrics_histogram_t *new_h = NULL; + + if (h->n_bins == NGX_WA_MAX_BINS) { + return NGX_ERROR; + } + + ngx_log_debug(NGX_LOG_DEBUG_WASM, metrics->shm->log, 0, + "growing histogram"); + + n = ngx_min(NGX_WA_BINS_INCREMENT, NGX_WA_MAX_BINS - h->n_bins); + old_size = sizeof(ngx_wa_metrics_histogram_t) + + sizeof(ngx_wa_metrics_bin_t) * h->n_bins; + size = old_size + sizeof(ngx_wa_metrics_bin_t) * n; + + if (metrics->shm->eviction == NGX_WASM_SHM_EVICTION_NONE) { + ngx_wasm_shm_lock(metrics->shm); + } + + new_h = ngx_slab_calloc_locked(metrics->shm->shpool, size); + if (new_h == NULL) { + ngx_log_debug(NGX_LOG_DEBUG_WASM, metrics->shm->log, 0, + "cannot expand histogram"); + rc = NGX_ERROR; + goto error; + } + + ngx_memcpy(new_h, h, old_size); + ngx_slab_free_locked(metrics->shm->shpool, h); + + new_h->n_bins += n; + *out = new_h; + +error: + + if (metrics->shm->eviction == NGX_WASM_SHM_EVICTION_NONE) { + ngx_wasm_shm_unlock(metrics->shm); + } + + return rc; +} + + +static ngx_wa_metrics_bin_t * +histogram_bin(ngx_wa_metrics_t *metrics, ngx_wa_metrics_histogram_t *h, + ngx_uint_t n, ngx_wa_metrics_histogram_t **out) +{ + size_t i, j = 0; + uint32_t ub = bin_log2_upper_bound(n); + ngx_wa_metrics_bin_t *b; + + for (i = 0; i < h->n_bins; i++) { + b = &h->bins[i]; + j = (j == 0 && ub < b->upper_bound) ? i : j; + + if (b->upper_bound == ub) { + return b; + + } else if (b->upper_bound == 0) { + break; + } + } + + if (i == h->n_bins) { + if (out && histogram_grow(metrics, h, out) == NGX_OK) { + h = *out; + + } else { + ngx_wasm_log_error(NGX_LOG_INFO, metrics->shm->log, 0, + "cannot add a new histogram bin for value " + "\"%uD\", returning next closest bin", n); + return &h->bins[j]; + } + } + + /* shift bins to create space for the new one */ + ngx_memcpy(&h->bins[j + 1], &h->bins[j], + sizeof(ngx_wa_metrics_bin_t) * (i - j)); + + h->bins[j].upper_bound = ub; + h->bins[j].count = 0; + + return &h->bins[j]; +} + + +void +ngx_wa_metrics_histogram_get(ngx_wa_metrics_t *metrics, ngx_wa_metric_t *m, + ngx_uint_t slots, ngx_wa_metrics_histogram_t *out) +{ + size_t i, j = 0; + ngx_wa_metrics_bin_t *b, *out_b; + ngx_wa_metrics_histogram_t *h; + + for (i = 0; i < slots; i++) { + h = m->slots[i].histogram; + + for (j = 0; j < h->n_bins; j++) { + b = &h->bins[j]; + if (b->upper_bound == 0) { + break; + } + + out_b = histogram_bin(metrics, out, b->upper_bound, NULL); + out_b->count += b->count; + } + } +} + +#if (NGX_DEBUG) +static void +histogram_log(ngx_wa_metrics_t *metrics, ngx_wa_metric_t *m, uint32_t mid) +{ + size_t i, size = sizeof(ngx_wa_metrics_histogram_t) + + sizeof(ngx_wa_metrics_bin_t) + * NGX_WA_MAX_BINS; + ngx_wa_metrics_bin_t *b; + ngx_wa_metrics_histogram_t *h; + u_char *p, buf[size], s_buf[NGX_MAX_ERROR_STR]; + + ngx_memzero(buf, size); + + p = s_buf; + h = (ngx_wa_metrics_histogram_t *) buf; + h->n_bins = NGX_WA_MAX_BINS; + h->bins[0].upper_bound = NGX_MAX_UINT32_VALUE; + + ngx_wa_metrics_histogram_get(metrics, m, metrics->workers, h); + + for (i = 0; i < h->n_bins; i++) { + b = &h->bins[i]; + if (b->upper_bound == 0) { + break; + } + + p = ngx_sprintf(p, " %uD: %uD;", b->upper_bound, b->count); + } + + ngx_log_debug3(NGX_LOG_DEBUG_WASM, metrics->shm->log, 0, + "histogram \"%uD\": %*s", mid, p - s_buf - 1, s_buf + 1); +} +#endif + +ngx_int_t +ngx_wa_metrics_histogram_add_locked(ngx_wa_metrics_t *metrics, + ngx_wa_metric_t *m) +{ + size_t i; + uint16_t n_bins = NGX_WA_INITIAL_BINS; + ngx_wa_metrics_histogram_t **h; + + for (i = 0; i < metrics->workers; i++) { + h = &m->slots[i].histogram; + *h = ngx_slab_calloc_locked(metrics->shm->shpool, + sizeof(ngx_wa_metrics_histogram_t) + + sizeof(ngx_wa_metrics_bin_t) * n_bins); + if (*h == NULL) { + goto error; + } + + (*h)->n_bins = n_bins; + (*h)->bins[0].upper_bound = NGX_MAX_UINT32_VALUE; + } + + return NGX_OK; + +error: + + ngx_wasm_log_error(NGX_LOG_ERR, metrics->shm->log, 0, + "cannot allocate histogram"); + + for (/* void */ ; i > 0; i--) { + ngx_slab_free_locked(metrics->shm->shpool, m->slots[i - 1].histogram); + } + + return NGX_ERROR; +} + + +ngx_int_t +ngx_wa_metrics_histogram_record(ngx_wa_metrics_t *metrics, ngx_wa_metric_t *m, + ngx_uint_t slot, uint32_t mid, ngx_uint_t n) +{ + ngx_wa_metrics_bin_t *b; + ngx_wa_metrics_histogram_t *h; + + h = m->slots[slot].histogram; + b = histogram_bin(metrics, h, n, &m->slots[slot].histogram); + b->count += 1; + +#if (NGX_DEBUG) + histogram_log(metrics, m, mid); +#endif + + return NGX_OK; +} diff --git a/src/common/metrics/ngx_wa_histogram.h b/src/common/metrics/ngx_wa_histogram.h new file mode 100644 index 000000000..854b1ad57 --- /dev/null +++ b/src/common/metrics/ngx_wa_histogram.h @@ -0,0 +1,16 @@ +#ifndef _NGX_WA_HISTOGRAM_H_INCLUDED_ +#define _NGX_WA_HISTOGRAM_H_INCLUDED_ + + +#include + + +ngx_int_t ngx_wa_metrics_histogram_add_locked(ngx_wa_metrics_t *metrics, + ngx_wa_metric_t *m); +ngx_int_t ngx_wa_metrics_histogram_record(ngx_wa_metrics_t *metrics, + ngx_wa_metric_t *m, ngx_uint_t slot, uint32_t mid, ngx_uint_t n); +void ngx_wa_metrics_histogram_get(ngx_wa_metrics_t *metrics, ngx_wa_metric_t *m, + ngx_uint_t slots, ngx_wa_metrics_histogram_t *out); + + +#endif /* _NGX_WA_HISTOGRAM_H_INCLUDED_ */ diff --git a/src/common/metrics/ngx_wa_metrics.c b/src/common/metrics/ngx_wa_metrics.c new file mode 100644 index 000000000..f9296571d --- /dev/null +++ b/src/common/metrics/ngx_wa_metrics.c @@ -0,0 +1,490 @@ +#ifndef DDEBUG +#define DDEBUG 0 +#endif +#include "ddebug.h" + +#include +#include +#include + +#define NGX_WA_DEFAULT_METRIC_NAME_LEN 256 +#define NGX_WA_DEFAULT_METRICS_SLAB_SIZE 1024 * 1024 * 5 + + +static ngx_str_t * +metric_type_name(ngx_wa_metric_type_e type) +{ + static ngx_str_t counter = ngx_string("counter"); + static ngx_str_t gauge = ngx_string("gauge"); + static ngx_str_t histogram = ngx_string("histogram"); + static ngx_str_t unknown = ngx_string("unknown"); + + switch (type) { + case NGX_WA_METRIC_COUNTER: + return &counter; + + case NGX_WA_METRIC_GAUGE: + return &gauge; + + case NGX_WA_METRIC_HISTOGRAM: + return &histogram; + + default: + return &unknown; + } +} + + +static ngx_uint_t +counter_get(ngx_wa_metric_t *m, ngx_uint_t slots) +{ + ngx_uint_t i, val = 0; + + for (i = 0; i < slots; i++) { + val += m->slots[i].counter; + } + + return val; +} + + +static ngx_uint_t +gauge_get(ngx_wa_metric_t *m, ngx_uint_t slots) +{ + ngx_msec_t l; + ngx_uint_t i, val = 0; + + val = m->slots[0].gauge.value; + l = m->slots[0].gauge.last_update; + + for (i = 1; i < slots; i++) { + if (m->slots[i].gauge.last_update > l) { + val = m->slots[i].gauge.value; + l = m->slots[i].gauge.last_update; + } + } + + return val; +} + + +static ngx_int_t +histogram_reallocate(ngx_wa_metrics_t *metrics, ngx_wa_metric_t *old_m, + uint32_t mid) +{ + uint32_t cas, slots = metrics->old_metrics->workers; + ngx_int_t rc; + ngx_str_t *val; + ngx_wa_metric_t *m; + + rc = ngx_wasm_shm_kv_get_locked(metrics->shm, NULL, &mid, &val, &cas); + if (rc != NGX_OK) { + return rc; + } + + m = (ngx_wa_metric_t *) val->data; + + ngx_wa_metrics_histogram_get(metrics, old_m, slots, m->slots[0].histogram); + + return NGX_OK; +} + + +static ngx_int_t +metrics_reallocate(ngx_wa_metrics_t *metrics, ngx_rbtree_node_t *node, + ngx_rbtree_node_t *sentinel) +{ + uint32_t mid; + ngx_int_t rc; + ngx_uint_t val; + ngx_wasm_shm_kv_node_t *n = (ngx_wasm_shm_kv_node_t *) node; + ngx_wa_metric_t *m = (ngx_wa_metric_t *) n->value.data; + + if (node == sentinel) { + return NGX_OK; + } + + ngx_log_debug1(NGX_LOG_DEBUG_WASM, metrics->shm->log, 0, + "reallocating metric \"%V\"", &n->key.str); + + if (ngx_wa_metrics_add(metrics, &n->key.str, m->type, &mid) != NGX_OK) { + ngx_wasm_log_error(NGX_LOG_ERR, metrics->shm->log, 0, + "failed redefining metric \"%V\"", &n->key.str); + + return NGX_ERROR; + } + + switch (m->type) { + case NGX_WA_METRIC_COUNTER: + val = counter_get(m, metrics->old_metrics->workers); + rc = ngx_wa_metrics_increment(metrics, mid, val); + break; + + case NGX_WA_METRIC_GAUGE: + val = gauge_get(m, metrics->old_metrics->workers); + rc = ngx_wa_metrics_record(metrics, mid, val); + break; + + case NGX_WA_METRIC_HISTOGRAM: + rc = histogram_reallocate(metrics, m, mid); + break; + + default: + ngx_wa_assert(0); + return NGX_ERROR; + } + + if (rc != NGX_OK) { + ngx_wasm_log_error(NGX_LOG_ERR, metrics->shm->log, 0, + "failed updating metric \"%V\"", &n->key.str); + return NGX_ERROR; + } + + if (node->left + && metrics_reallocate(metrics, node->left, sentinel) != NGX_OK) + { + return NGX_ERROR; + } + + if (node->right + && metrics_reallocate(metrics, node->right, sentinel) != NGX_OK) + { + return NGX_ERROR; + } + + return NGX_OK; +} + + +ngx_wa_metrics_t * +ngx_wa_metrics_alloc(ngx_cycle_t *cycle) +{ + static ngx_str_t shm_name = ngx_string("metrics"); + ngx_wa_metrics_t *metrics; + + metrics = ngx_pcalloc(cycle->pool, sizeof(ngx_wa_metrics_t)); + if (metrics == NULL) { + ngx_wasm_log_error(NGX_LOG_ERR, cycle->log, 0, + "failed allocating metrics structure"); + + return NULL; + } + + metrics->old_metrics = ngx_wasmx_metrics(cycle->old_cycle); + metrics->config.slab_size = NGX_CONF_UNSET_SIZE; + metrics->config.max_metric_name_length = NGX_CONF_UNSET_SIZE; + + metrics->shm = ngx_pcalloc(cycle->pool, sizeof(ngx_wasm_shm_t)); + if (metrics->shm == NULL) { + ngx_wasm_log_error(NGX_LOG_ERR, cycle->log, 0, + "failed allocating metrics shm structure"); + ngx_pfree(cycle->pool, metrics); + + return NULL; + } + + metrics->shm->log = &cycle->new_log; + metrics->shm->name = shm_name; + metrics->shm->type = NGX_WASM_SHM_TYPE_METRICS; + metrics->shm->eviction = NGX_WASM_SHM_EVICTION_NONE; + metrics->shm->data = NULL; + + return metrics; +} + + +ngx_int_t +ngx_wa_metrics_init_conf(ngx_wa_metrics_t *metrics, ngx_conf_t *cf) +{ + ngx_cycle_t *cycle = cf->cycle; + ngx_core_conf_t *ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, + ngx_core_module); + ngx_wa_metrics_t *old_metrics = metrics->old_metrics;; + + if (metrics->config.slab_size == NGX_CONF_UNSET_SIZE) { + metrics->config.slab_size = NGX_WA_DEFAULT_METRICS_SLAB_SIZE; + } + + if (metrics->config.max_metric_name_length == NGX_CONF_UNSET_SIZE) { + metrics->config.max_metric_name_length = NGX_WA_DEFAULT_METRIC_NAME_LEN; + } + + /* TODO: if eviction is enabled, metrics->workers must be set to 1 */ + metrics->workers = ccf->worker_processes; + metrics->shm_zone = ngx_shared_memory_add(cf, &metrics->shm->name, + metrics->config.slab_size, + &ngx_wasmx_module); + if (metrics->shm_zone == NULL) { + return NGX_ERROR; + } + + metrics->shm_zone->data = metrics->shm; + metrics->shm_zone->init = ngx_wasm_shm_init_zone; + metrics->shm_zone->noreuse = 0; + + if (old_metrics + && (metrics->workers != old_metrics->workers + || metrics->config.slab_size != old_metrics->config.slab_size)) + { + metrics->shm_zone->noreuse = 1; + } + + return NGX_OK; +} + + +ngx_int_t +ngx_wa_metrics_init(ngx_wa_metrics_t *metrics, ngx_cycle_t *cycle) +{ + ngx_int_t rc; + ngx_wasm_shm_kv_t *old_shm_kv; + + if (metrics->old_metrics && !metrics->shm_zone->noreuse) { + /* reuse old kv store */ + metrics->shm->data = metrics->old_metrics->shm->data; + + return NGX_OK; + } + + rc = ngx_wasm_shm_kv_init(metrics->shm); + if (rc != NGX_OK) { + return rc; + } + + if (metrics->old_metrics && metrics->shm_zone->noreuse) { + old_shm_kv = ngx_wasm_shm_get_kv(metrics->old_metrics->shm); + + return metrics_reallocate(metrics, old_shm_kv->rbtree.root, + old_shm_kv->rbtree.sentinel); + } + + return NGX_OK; +} + + +ngx_int_t +ngx_wa_metrics_add(ngx_wa_metrics_t *metrics, ngx_str_t *name, + ngx_wa_metric_type_e type, uint32_t *out) +{ + ssize_t size = sizeof(ngx_wa_metric_t) + + sizeof(ngx_wa_metric_val_t) * metrics->workers; + uint32_t cas, mid; + ngx_int_t rc, written; + ngx_str_t *p, val; + ngx_wa_metric_t *m; + u_char buf[size]; + + if (type != NGX_WA_METRIC_COUNTER + && type != NGX_WA_METRIC_GAUGE + && type != NGX_WA_METRIC_HISTOGRAM) + { + return NGX_ERROR; + } + + if (name->len > metrics->config.max_metric_name_length) { + return NGX_ERROR; + } + + mid = ngx_crc32_long(name->data, name->len); + + ngx_wasm_shm_lock(metrics->shm); + + rc = ngx_wasm_shm_kv_get_locked(metrics->shm, NULL, &mid, &p, &cas); + if (rc == NGX_OK) { + goto done; + } + + dd("adding new metric"); + + ngx_memzero(buf, size); + m = (ngx_wa_metric_t *) buf; + m->type = type; + + if (type == NGX_WA_METRIC_HISTOGRAM) { + rc = ngx_wa_metrics_histogram_add_locked(metrics, m); + if (rc != NGX_OK) { + goto error; + } + } + + val.len = size; + val.data = buf; + + rc = ngx_wasm_shm_kv_set_locked(metrics->shm, name, &val, 0, &written); + + if (rc != NGX_OK) { + goto error; + } + +done: + + *out = mid; + +error: + + ngx_wasm_shm_unlock(metrics->shm); + + if (rc == NGX_OK) { + ngx_wasm_log_error(NGX_LOG_INFO, metrics->shm->log, 0, + "defined %V \"%V\" with id %uD", + metric_type_name(type), name, mid); + } + + return rc; +} + + +ngx_int_t +ngx_wa_metrics_get(ngx_wa_metrics_t *metrics, uint32_t mid, ngx_uint_t *out) +{ + uint32_t cas; + ngx_int_t rc; + ngx_str_t *n; + ngx_wa_metric_t *m; + + rc = ngx_wasm_shm_kv_get_locked(metrics->shm, NULL, &mid, &n, &cas); + if (rc != NGX_OK) { + ngx_wasm_log_error(NGX_LOG_ERR, metrics->shm->log, 0, + "metric \"%uD\" not found", mid); + return rc; + } + + m = (ngx_wa_metric_t *) n->data; + + switch (m->type) { + case NGX_WA_METRIC_COUNTER: + *out = counter_get(m, metrics->workers); + break; + + case NGX_WA_METRIC_GAUGE: + *out = gauge_get(m, metrics->workers); + break; + + default: + ngx_wa_assert(0); + return NGX_ERROR; + } + + ngx_log_debug2(NGX_LOG_DEBUG_WASM, metrics->shm->log, 0, + "wasm retrieving metric \"%z\" as %d", mid, *out); + + return NGX_OK; +} + + +ngx_int_t +ngx_wa_metrics_increment(ngx_wa_metrics_t *metrics, uint32_t mid, ngx_int_t n) +{ + uint32_t cas; + ngx_int_t rc = NGX_OK; + ngx_str_t *val; + ngx_uint_t slot; + ngx_wa_metric_t *m; + + slot = (ngx_process == NGX_PROCESS_WORKER) ? ngx_worker : 0; + +#if 0 + if (metrics->shm->eviction != NGX_WASM_EVICTION_NONE) { + slot = 0; + ngx_wasm_shm_lock(metrics->shm); + } +#endif + + rc = ngx_wasm_shm_kv_get_locked(metrics->shm, NULL, &mid, &val, &cas); + if (rc != NGX_OK) { + ngx_wasm_log_error(NGX_LOG_ERR, metrics->shm->log, 0, + "metric \"%uD\" not found", mid); + goto error; + } + + m = (ngx_wa_metric_t *) val->data; + + if (m->type != NGX_WA_METRIC_COUNTER) { + ngx_wasm_log_error(NGX_LOG_ERR, metrics->shm->log, 0, + "attempt to call increment_metric on a %V; " + "operation not supported", + metric_type_name(m->type)); + rc = NGX_ERROR; + goto error; + } + + ngx_log_debug2(NGX_LOG_DEBUG_WASM, metrics->shm->log, 0, + "wasm updating metric \"%uD\" with %d", mid, n); + + m->slots[slot].counter += n; + +error: + +#if 0 + if (metrics->shm->eviction != NGX_WASM_EVICTION_NONE) { + ngx_wasm_shm_unlock(metrics->shm); + } +#endif + + return rc; +} + + +ngx_int_t +ngx_wa_metrics_record(ngx_wa_metrics_t *metrics, uint32_t mid, ngx_int_t n) +{ + uint32_t cas; + ngx_int_t rc = NGX_OK; + ngx_str_t *val; + ngx_uint_t slot; + ngx_wa_metric_t *m; + + slot = (ngx_process == NGX_PROCESS_WORKER) ? ngx_worker : 0; + +#if 0 + if (metrics->shm->eviction != NGX_WASM_EVICTION_NONE) { + slot = 0; + ngx_wasm_shm_lock(metrics->shm); + } +#endif + + rc = ngx_wasm_shm_kv_get_locked(metrics->shm, NULL, &mid, &val, &cas); + if (rc != NGX_OK) { + ngx_wasm_log_error(NGX_LOG_ERR, metrics->shm->log, 0, + "metric \"%uD\" not found", mid); + + goto error; + } + + ngx_log_debug2(NGX_LOG_DEBUG_WASM, metrics->shm->log, 0, + "wasm updating metric \"%uD\" with %d", mid, n); + + m = (ngx_wa_metric_t *) val->data; + + switch (m->type) { + case NGX_WA_METRIC_GAUGE: + m->slots[slot].gauge.value = n; + m->slots[slot].gauge.last_update = ngx_current_msec; + + break; + + case NGX_WA_METRIC_HISTOGRAM: + ngx_wa_metrics_histogram_record(metrics, m, slot, mid, n); + + break; + + default: + ngx_wasm_log_error(NGX_LOG_ERR, metrics->shm->log, 0, + "attempt to call record_metric on a counter; " + "operation not supported"); + rc = NGX_ERROR; + goto error; + + break; + } + +error: + +#if 0 + if (metrics->shm->eviction != NGX_WASM_EVICTION_NONE) { + ngx_wasm_shm_unlock(metrics->shm); + } +#endif + + return rc; +} diff --git a/src/common/metrics/ngx_wa_metrics.h b/src/common/metrics/ngx_wa_metrics.h new file mode 100644 index 000000000..d59ebe49b --- /dev/null +++ b/src/common/metrics/ngx_wa_metrics.h @@ -0,0 +1,79 @@ +#ifndef _NGX_WA_METRICS_H_INCLUDED_ +#define _NGX_WA_METRICS_H_INCLUDED_ + + +#include + + +typedef struct ngx_wa_metrics_s ngx_wa_metrics_t; + + +typedef struct { + size_t slab_size; + size_t max_metric_name_length; +} ngx_wa_metrics_conf_t; + + +typedef enum { + NGX_WA_METRIC_COUNTER, + NGX_WA_METRIC_GAUGE, + NGX_WA_METRIC_HISTOGRAM, +} ngx_wa_metric_type_e; + + +typedef struct { + ngx_uint_t value; + ngx_msec_t last_update; +} ngx_wa_metrics_gauge_t; + + +typedef struct { + uint32_t upper_bound; + uint32_t count; +} ngx_wa_metrics_bin_t; + + +typedef struct { + uint8_t n_bins; + ngx_wa_metrics_bin_t bins[]; +} ngx_wa_metrics_histogram_t; + + +typedef union { + ngx_uint_t counter; + ngx_wa_metrics_gauge_t gauge; + ngx_wa_metrics_histogram_t *histogram; +} ngx_wa_metric_val_t; + + +typedef struct { + ngx_wa_metric_type_e type; + ngx_wa_metric_val_t slots[]; +} ngx_wa_metric_t; + + +struct ngx_wa_metrics_s { + ngx_uint_t workers; + ngx_shm_zone_t *shm_zone; + ngx_wasm_shm_t *shm; + ngx_wa_metrics_t *old_metrics; + ngx_wa_metrics_conf_t config; +}; + + +ngx_wa_metrics_t *ngx_wasmx_metrics(ngx_cycle_t *cycle); + +ngx_wa_metrics_t *ngx_wa_metrics_alloc(ngx_cycle_t *cycle); +ngx_int_t ngx_wa_metrics_init_conf(ngx_wa_metrics_t *metrics, ngx_conf_t *cf); +ngx_int_t ngx_wa_metrics_init(ngx_wa_metrics_t *metrics, ngx_cycle_t *cycle); +ngx_int_t ngx_wa_metrics_add(ngx_wa_metrics_t *metrics, ngx_str_t *name, + ngx_wa_metric_type_e type, uint32_t *out); +ngx_int_t ngx_wa_metrics_get(ngx_wa_metrics_t *metrics, uint32_t metric_id, + ngx_uint_t *out); +ngx_int_t ngx_wa_metrics_increment(ngx_wa_metrics_t *metrics, + uint32_t metric_id, ngx_int_t val); +ngx_int_t ngx_wa_metrics_record(ngx_wa_metrics_t *metrics, uint32_t metric_id, + ngx_int_t val); + + +#endif /* _NGX_WA_METRICS_H_INCLUDED_ */ diff --git a/src/common/proxy_wasm/ngx_proxy_wasm.h b/src/common/proxy_wasm/ngx_proxy_wasm.h index 2daf69092..408054d62 100644 --- a/src/common/proxy_wasm/ngx_proxy_wasm.h +++ b/src/common/proxy_wasm/ngx_proxy_wasm.h @@ -141,13 +141,11 @@ typedef enum { } ngx_proxy_wasm_map_type_e; -#if 0 typedef enum { NGX_PROXY_WASM_METRIC_COUNTER = 0, NGX_PROXY_WASM_METRIC_GAUGE = 1, NGX_PROXY_WASM_METRIC_HISTOGRAM = 2, } ngx_proxy_wasm_metric_type_e; -#endif typedef struct ngx_proxy_wasm_ctx_s ngx_proxy_wasm_ctx_t; diff --git a/src/common/proxy_wasm/ngx_proxy_wasm_host.c b/src/common/proxy_wasm/ngx_proxy_wasm_host.c index b933fae29..2c073e24d 100644 --- a/src/common/proxy_wasm/ngx_proxy_wasm_host.c +++ b/src/common/proxy_wasm/ngx_proxy_wasm_host.c @@ -10,6 +10,7 @@ #include #include #include +#include #ifdef NGX_WASM_HTTP #include #endif @@ -1575,7 +1576,142 @@ ngx_proxy_wasm_hfuncs_dequeue_shared_queue(ngx_wavm_instance_t *instance, /* stats/metrics */ -/* NYI */ + + +static ngx_int_t +ngx_proxy_wasm_hfuncs_define_metric(ngx_wavm_instance_t *instance, + wasm_val_t args[], wasm_val_t rets[]) +{ + size_t max_len; + uint32_t *id; + ngx_str_t name, prefixed_name, *filter_name; + ngx_cycle_t *cycle = (ngx_cycle_t *) ngx_cycle; + ngx_wa_metrics_t *metrics = ngx_wasmx_metrics(cycle); + ngx_wa_metric_type_e type; + ngx_proxy_wasm_exec_t *pwexec; + ngx_proxy_wasm_metric_type_e pw_type; + u_char buf[metrics->config.max_metric_name_length]; + + pwexec = ngx_proxy_wasm_instance2pwexec(instance); + + pw_type = args[0].of.i32; + name.len = args[2].of.i32; + name.data = NGX_WAVM_HOST_LIFT_SLICE(instance, args[1].of.i32, name.len); + id = NGX_WAVM_HOST_LIFT_SLICE(instance, args[3].of.i32, sizeof(uint32_t)); + max_len = metrics->config.max_metric_name_length; + + switch(pw_type) { + case NGX_PROXY_WASM_METRIC_COUNTER: + type = NGX_WA_METRIC_COUNTER; + break; + + case NGX_PROXY_WASM_METRIC_GAUGE: + type = NGX_WA_METRIC_GAUGE; + break; + + case NGX_PROXY_WASM_METRIC_HISTOGRAM: + type = NGX_WA_METRIC_HISTOGRAM; + break; + + default: + return ngx_proxy_wasm_result_trap(pwexec, "unknown metric type", + rets, + NGX_WAVM_ERROR); + } + + filter_name = pwexec->filter->name; + + if (filter_name->len + 4 + name.len > max_len) { + return ngx_proxy_wasm_result_trap(pwexec, "metric name too long", rets, + NGX_WAVM_ERROR); + } + + prefixed_name.data = buf; + prefixed_name.len = ngx_sprintf(buf, "pw.%V.%V", filter_name, &name) - buf; + + if (ngx_wa_metrics_add(metrics, &prefixed_name, type, id) != NGX_OK) { + return ngx_proxy_wasm_result_trap(pwexec, "could not define metric", + rets, + NGX_WAVM_ERROR); + } + + return ngx_proxy_wasm_result_ok(rets); +} + + +static ngx_int_t +ngx_proxy_wasm_hfuncs_increment_metric(ngx_wavm_instance_t *instance, + wasm_val_t args[], wasm_val_t rets[]) +{ + uint32_t metric_id; + ngx_int_t rc, offset; + ngx_cycle_t *cycle = (ngx_cycle_t *) ngx_cycle; + ngx_wa_metrics_t *metrics = ngx_wasmx_metrics(cycle); + ngx_proxy_wasm_exec_t *pwexec = ngx_proxy_wasm_instance2pwexec(instance); + + metric_id = args[0].of.i32; + offset = args[1].of.i64; + + rc = ngx_wa_metrics_increment(metrics, metric_id, offset); + + if (rc != NGX_OK) { + return ngx_proxy_wasm_result_trap(pwexec, "could not increment metric", + rets, NGX_WAVM_ERROR); + } + + return ngx_proxy_wasm_result_ok(rets); +} + + +static ngx_int_t +ngx_proxy_wasm_hfuncs_record_metric(ngx_wavm_instance_t *instance, + wasm_val_t args[], wasm_val_t rets[]) +{ + uint32_t metric_id; + ngx_int_t rc, offset; + ngx_cycle_t *cycle = (ngx_cycle_t *) ngx_cycle; + ngx_wa_metrics_t *metrics = ngx_wasmx_metrics(cycle); + ngx_proxy_wasm_exec_t *pwexec = ngx_proxy_wasm_instance2pwexec(instance); + + metric_id = args[0].of.i32; + offset = args[1].of.i64; + + rc = ngx_wa_metrics_record(metrics, metric_id, offset); + + if (rc != NGX_OK) { + return ngx_proxy_wasm_result_trap(pwexec, + "could not record metric value", rets, + NGX_WAVM_ERROR); + } + + return ngx_proxy_wasm_result_ok(rets); +} + + +static ngx_int_t +ngx_proxy_wasm_hfuncs_get_metric(ngx_wavm_instance_t *instance, + wasm_val_t args[], wasm_val_t rets[]) +{ + uint32_t metric_id; + ngx_int_t rc; + ngx_uint_t *ret_value; + ngx_cycle_t *cycle = (ngx_cycle_t *) ngx_cycle; + ngx_wa_metrics_t *metrics = ngx_wasmx_metrics(cycle); + ngx_proxy_wasm_exec_t *pwexec = ngx_proxy_wasm_instance2pwexec(instance); + + metric_id = args[0].of.i32; + ret_value = NGX_WAVM_HOST_LIFT(instance, args[1].of.i32, ngx_uint_t); + + rc = ngx_wa_metrics_get(metrics, metric_id, ret_value); + + if (rc != NGX_OK) { + /* TODO: format with key */ + return ngx_proxy_wasm_result_trap(pwexec, "could not retrieve metric", + rets, NGX_WAVM_ERROR); + } + + return ngx_proxy_wasm_result_ok(rets); +} /* custom extension points */ @@ -1957,7 +2093,7 @@ static ngx_wavm_host_func_def_t ngx_proxy_wasm_hfuncs[] = { ngx_wavm_arity_i32x4, ngx_wavm_arity_i32 }, { ngx_string("proxy_define_metric"), /* 0.2.0 && 0.2.1 */ - &ngx_proxy_wasm_hfuncs_nop, /* NYI */ + &ngx_proxy_wasm_hfuncs_define_metric, ngx_wavm_arity_i32x4, ngx_wavm_arity_i32 }, { ngx_string("proxy_get_metric_value"), /* vNEXT */ @@ -1965,7 +2101,7 @@ static ngx_wavm_host_func_def_t ngx_proxy_wasm_hfuncs[] = { ngx_wavm_arity_i32x2, ngx_wavm_arity_i32 }, { ngx_string("proxy_get_metric"), /* 0.2.0 && 0.2.1 */ - &ngx_proxy_wasm_hfuncs_nop, /* NYI */ + &ngx_proxy_wasm_hfuncs_get_metric, ngx_wavm_arity_i32x2, ngx_wavm_arity_i32 }, { ngx_string("proxy_set_metric_value"), /* vNEXT */ @@ -1973,7 +2109,7 @@ static ngx_wavm_host_func_def_t ngx_proxy_wasm_hfuncs[] = { ngx_wavm_arity_i32_i64, ngx_wavm_arity_i32 }, { ngx_string("proxy_record_metric"), /* 0.2.0 && 0.2.1 */ - &ngx_proxy_wasm_hfuncs_nop, /* NYI */ + &ngx_proxy_wasm_hfuncs_record_metric, ngx_wavm_arity_i32_i64, ngx_wavm_arity_i32 }, { ngx_string("proxy_increment_metric_value"), /* vNEXT */ @@ -1981,7 +2117,7 @@ static ngx_wavm_host_func_def_t ngx_proxy_wasm_hfuncs[] = { ngx_wavm_arity_i32_i64, ngx_wavm_arity_i32 }, { ngx_string("proxy_increment_metric"), /* 0.2.0 && 0.2.1 */ - &ngx_proxy_wasm_hfuncs_nop, /* NYI */ + &ngx_proxy_wasm_hfuncs_increment_metric, ngx_wavm_arity_i32_i64, ngx_wavm_arity_i32 }, { ngx_string("proxy_delete_metric"), /* vNEXT */ diff --git a/src/common/shm/ngx_wasm_shm.h b/src/common/shm/ngx_wasm_shm.h index 1ae08b43e..f3a885138 100644 --- a/src/common/shm/ngx_wasm_shm.h +++ b/src/common/shm/ngx_wasm_shm.h @@ -11,6 +11,7 @@ typedef enum { NGX_WASM_SHM_TYPE_KV, NGX_WASM_SHM_TYPE_QUEUE, + NGX_WASM_SHM_TYPE_METRICS, } ngx_wasm_shm_type_e; diff --git a/src/common/shm/ngx_wasm_shm_kv.c b/src/common/shm/ngx_wasm_shm_kv.c index 265023d83..834b46212 100644 --- a/src/common/shm/ngx_wasm_shm_kv.c +++ b/src/common/shm/ngx_wasm_shm_kv.c @@ -15,28 +15,11 @@ #define NGX_WASM_SLRU_NQUEUES(pool) (NGX_WASM_SLAB_SLOTS(pool) + 1) -typedef struct { - ngx_rbtree_t rbtree; - ngx_rbtree_node_t sentinel; - union { - ngx_queue_t lru_queue; - ngx_queue_t slru_queues[0]; - } eviction; -} ngx_wasm_shm_kv_t; - - -typedef struct { - ngx_str_node_t key; - ngx_str_t value; - uint32_t cas; - ngx_queue_t queue; -} ngx_wasm_shm_kv_node_t; - - -static ngx_inline ngx_wasm_shm_kv_t * +ngx_wasm_shm_kv_t * ngx_wasm_shm_get_kv(ngx_wasm_shm_t *shm) { - ngx_wa_assert(shm->type == NGX_WASM_SHM_TYPE_KV); + ngx_wa_assert(shm->type == NGX_WASM_SHM_TYPE_KV || \ + shm->type == NGX_WASM_SHM_TYPE_METRICS); return shm->data; } @@ -287,8 +270,7 @@ ngx_wasm_shm_kv_set_locked(ngx_wasm_shm_t *shm, ngx_str_t *key, ngx_wasm_shm_kv_node_t *n, *old; old = NULL; - n = (ngx_wasm_shm_kv_node_t *) ngx_wasm_shm_rbtree_lookup(&kv->rbtree, - key_hash); + n = ngx_wasm_shm_rbtree_lookup(&kv->rbtree, key_hash); if (cas != (n == NULL ? 0 : n->cas)) { *written = 0; diff --git a/src/common/shm/ngx_wasm_shm_kv.h b/src/common/shm/ngx_wasm_shm_kv.h index fe280034f..d711b6b57 100644 --- a/src/common/shm/ngx_wasm_shm_kv.h +++ b/src/common/shm/ngx_wasm_shm_kv.h @@ -5,6 +5,16 @@ #include +typedef struct { + ngx_rbtree_t rbtree; + ngx_rbtree_node_t sentinel; + union { + ngx_queue_t lru_queue; + ngx_queue_t slru_queues[0]; + } eviction; +} ngx_wasm_shm_kv_t; + + typedef struct { ngx_str_t namespace; ngx_str_t key; @@ -13,6 +23,16 @@ typedef struct { } ngx_wasm_shm_kv_key_t; +typedef struct { + ngx_str_node_t key; + ngx_str_t value; + uint32_t cas; + ngx_queue_t queue; +} ngx_wasm_shm_kv_node_t; + + +ngx_wasm_shm_kv_t * ngx_wasm_shm_get_kv(ngx_wasm_shm_t *shm); + ngx_int_t ngx_wasm_shm_kv_init(ngx_wasm_shm_t *shm); ngx_int_t ngx_wasm_shm_kv_get_locked(ngx_wasm_shm_t *shm, ngx_str_t *key, uint32_t *key_hash, ngx_str_t **value_out, uint32_t *cas); diff --git a/src/ngx_wasmx.c b/src/ngx_wasmx.c index 40787eabc..e7f24a013 100644 --- a/src/ngx_wasmx.c +++ b/src/ngx_wasmx.c @@ -68,6 +68,24 @@ ngx_module_t ngx_wasmx_module = { }; +ngx_inline ngx_wa_metrics_t * +ngx_wasmx_metrics(ngx_cycle_t *cycle) +{ + ngx_wa_conf_t *wacf; + + if (!cycle->conf_ctx) { + return NULL; + } + + wacf = ngx_wa_cycle_get_conf(cycle); + if (wacf == NULL) { + return NULL; + } + + return wacf->metrics; +} + + static char * ngx_wasmx_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf, ngx_uint_t type, ngx_uint_t conf_type) @@ -116,6 +134,11 @@ ngx_wasmx_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf, } #endif + wacf->metrics = ngx_wa_metrics_alloc(cf->cycle); + if (wacf->metrics == NULL) { + return NGX_CONF_ERROR; + } + *(ngx_wa_conf_t **) conf = wacf; } @@ -217,6 +240,10 @@ ngx_wasmx_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf, } } + if (ngx_wa_metrics_init_conf(wacf->metrics, cf) != NGX_OK) { + return NGX_CONF_ERROR; + } + return NGX_CONF_OK; } @@ -249,6 +276,11 @@ ngx_wasmx_init(ngx_cycle_t *cycle) return NGX_OK; } + rc = ngx_wa_metrics_init(wacf->metrics, cycle); + if (rc != NGX_OK) { + return rc; + } + /* NGX_WASM_MODULES + NGX_IPC_MODULES init */ for (i = 0; cycle->modules[i]; i++) { diff --git a/src/ngx_wasmx.h b/src/ngx_wasmx.h index e61001fd2..9f177962e 100644 --- a/src/ngx_wasmx.h +++ b/src/ngx_wasmx.h @@ -3,6 +3,7 @@ #include +#include #if (NGX_DEBUG) @@ -27,11 +28,12 @@ typedef ngx_int_t (*ngx_wa_init_pt)(ngx_cycle_t *cycle); typedef struct { - ngx_uint_t initialized_types; - void **wasm_confs; + ngx_uint_t initialized_types; + void **wasm_confs; #ifdef NGX_WA_IPC - void **ipc_confs; + void **ipc_confs; #endif + ngx_wa_metrics_t *metrics; } ngx_wa_conf_t; diff --git a/src/wasm/ngx_wasm.h b/src/wasm/ngx_wasm.h index c570d10b0..151b0a304 100644 --- a/src/wasm/ngx_wasm.h +++ b/src/wasm/ngx_wasm.h @@ -18,6 +18,7 @@ #define NGX_WASMTIME_CONF 0x20000000 #define NGX_WASMER_CONF 0x40000000 #define NGX_V8_CONF 0x80000000 +#define NGX_METRICS_CONF 0x16000000 #define NGX_WASM_DONE_PHASE 15 #define NGX_WASM_BACKGROUND_PHASE 16 @@ -125,12 +126,16 @@ void ngx_wasm_log_error(ngx_uint_t level, ngx_log_t *log, ngx_err_t err, void swap_modules_if_needed(ngx_conf_t *cf, const char *m1, const char *m2); #endif -/* directives */ +/* blocks */ char *ngx_wasm_core_wasmtime_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); char *ngx_wasm_core_wasmer_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); char *ngx_wasm_core_v8_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +char *ngx_wasm_core_metrics_block(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); + +/* directives */ char *ngx_wasm_core_flag_directive(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); char *ngx_wasm_core_module_directive(ngx_conf_t *cf, ngx_command_t *cmd, @@ -139,6 +144,10 @@ char *ngx_wasm_core_shm_kv_directive(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); char *ngx_wasm_core_shm_queue_directive(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +char *ngx_wasm_core_metrics_slab_size_directive(ngx_conf_t *cf, + ngx_command_t *cmd, void *conf); +char *ngx_wasm_core_metrics_max_metric_name_length_directive(ngx_conf_t *cf, + ngx_command_t *cmd, void *conf); char *ngx_wasm_core_resolver_directive(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); char *ngx_wasm_core_pwm_lua_resolver_directive(ngx_conf_t *cf, diff --git a/src/wasm/ngx_wasm_core_module.c b/src/wasm/ngx_wasm_core_module.c index cd7f8d80b..f367a44ad 100644 --- a/src/wasm/ngx_wasm_core_module.c +++ b/src/wasm/ngx_wasm_core_module.c @@ -5,6 +5,7 @@ #include #include +#include static void *ngx_wasm_core_create_conf(ngx_conf_t *cf); @@ -43,6 +44,13 @@ static ngx_command_t ngx_wasm_core_commands[] = { 0, NULL }, + { ngx_string("metrics"), + NGX_WASM_CONF|NGX_CONF_BLOCK|NGX_CONF_NOARGS, + ngx_wasm_core_metrics_block, + NGX_WA_WASM_CONF_OFFSET, + 0, + NULL }, + /* directives */ { ngx_string("flag"), @@ -90,6 +98,20 @@ static ngx_command_t ngx_wasm_core_commands[] = { 0, NULL }, + { ngx_string("slab_size"), + NGX_METRICS_CONF|NGX_CONF_TAKE1, + ngx_wasm_core_metrics_slab_size_directive, + NGX_WA_WASM_CONF_OFFSET, + 0, + NULL }, + + { ngx_string("max_metric_name_length"), + NGX_METRICS_CONF|NGX_CONF_TAKE1, + ngx_wasm_core_metrics_max_metric_name_length_directive, + NGX_WA_WASM_CONF_OFFSET, + 0, + NULL }, + { ngx_string("shm_queue"), NGX_WASM_CONF|NGX_CONF_TAKE23|NGX_CONF_TAKE4, ngx_wasm_core_shm_queue_directive, diff --git a/src/wasm/ngx_wasm_directives.c b/src/wasm/ngx_wasm_directives.c index 5437eae08..0ff42751f 100644 --- a/src/wasm/ngx_wasm_directives.c +++ b/src/wasm/ngx_wasm_directives.c @@ -7,6 +7,10 @@ #include +static ngx_int_t ngx_wasm_core_shm_validate_size(ngx_conf_t *cf, ssize_t size, + ngx_str_t *value); + + static char * ngx_wasm_core_runtime_block(ngx_conf_t *cf, ngx_uint_t cmd_type) { @@ -45,6 +49,23 @@ ngx_wasm_core_v8_block(ngx_conf_t *cf, ngx_command_t *cmd, void *dummy) } +char * +ngx_wasm_core_metrics_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + char *rv; + ngx_conf_t save = *cf; + + cf->cmd_type = NGX_METRICS_CONF; + cf->module_type = NGX_WASM_MODULE; + + rv = ngx_conf_parse(cf, NULL); + + *cf = save; + + return rv; +} + + static ngx_int_t ngx_wasm_core_current_runtime_flag(ngx_conf_t *cf) { @@ -109,6 +130,35 @@ ngx_wasm_core_flag_directive(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) } +static ngx_int_t +ngx_wasm_core_shm_validate_size(ngx_conf_t *cf, ssize_t size, ngx_str_t *value) +{ + const ssize_t min_size = 3 * ngx_pagesize; + + if (size == NGX_ERROR) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "[wasm] invalid shm size \"%V\"", value); + return NGX_ERROR; + } + + if (size < min_size) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "[wasm] shm size of %d bytes is too small, " + "minimum required is %d bytes", size, min_size); + return NGX_ERROR; + } + + if ((size & (ngx_pagesize - 1)) != 0) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "[wasm] shm size of %d bytes is not page-aligned, " + "must be a multiple of %d", size, ngx_pagesize); + return NGX_ERROR; + } + + return NGX_OK; +} + + static char * ngx_wasm_core_shm_generic_directive(ngx_conf_t *cf, ngx_command_t *cmd, void *conf, ngx_wasm_shm_type_e type) @@ -120,7 +170,6 @@ ngx_wasm_core_shm_generic_directive(ngx_conf_t *cf, ngx_command_t *cmd, ngx_wasm_shm_mapping_t *mapping; ngx_wasm_shm_t *shm; ngx_wasm_shm_eviction_e eviction; - const ssize_t min_size = 3 * ngx_pagesize; value = cf->args->elts; name = &value[1]; @@ -134,23 +183,7 @@ ngx_wasm_core_shm_generic_directive(ngx_conf_t *cf, ngx_command_t *cmd, return NGX_CONF_ERROR; } - if (size == NGX_ERROR) { - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "[wasm] invalid shm size \"%V\"", &value[2]); - return NGX_CONF_ERROR; - } - - if (size < min_size) { - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "[wasm] shm size of %d bytes is too small, " - "minimum required is %d bytes", size, min_size); - return NGX_CONF_ERROR; - } - - if ((size & (ngx_pagesize - 1)) != 0) { - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "[wasm] shm size of %d bytes is not page-aligned, " - "must be a multiple of %d", size, ngx_pagesize); + if (ngx_wasm_core_shm_validate_size(cf, size, &value[2]) != NGX_OK) { return NGX_CONF_ERROR; } @@ -295,6 +328,57 @@ ngx_wasm_core_shm_queue_directive(ngx_conf_t *cf, ngx_command_t *cmd, } +char * +ngx_wasm_core_metrics_slab_size_directive(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf) +{ + ssize_t size; + ngx_str_t *value; + ngx_wa_metrics_t *metrics = ngx_wasmx_metrics(cf->cycle); + + if (cf->cmd_type != NGX_METRICS_CONF) { + return NGX_CONF_ERROR; + } + + if (metrics->config.slab_size != NGX_CONF_UNSET_SIZE) { + return "is duplicate"; + } + + value = cf->args->elts; + size = ngx_parse_size(&value[1]); + + if (ngx_wasm_core_shm_validate_size(cf, size, &value[1]) != NGX_OK) { + return NGX_CONF_ERROR; + } + + metrics->config.slab_size = size; + + return NGX_CONF_OK; +} + + +char * +ngx_wasm_core_metrics_max_metric_name_length_directive(ngx_conf_t *cf, + ngx_command_t *cmd, void *conf) +{ + ngx_str_t *value; + ngx_wa_metrics_t *metrics = ngx_wasmx_metrics(cf->cycle); + + if (cf->cmd_type != NGX_METRICS_CONF) { + return NGX_CONF_ERROR; + } + + if (metrics->config.max_metric_name_length != NGX_CONF_UNSET_SIZE) { + return "is duplicate"; + } + + value = cf->args->elts; + metrics->config.max_metric_name_length = ngx_parse_size(&value[1]); + + return NGX_CONF_OK; +} + + char * ngx_wasm_core_resolver_directive(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { diff --git a/t/01-wasm/directives/011-metrics_directives.t b/t/01-wasm/directives/011-metrics_directives.t new file mode 100644 index 000000000..466fe0ef2 --- /dev/null +++ b/t/01-wasm/directives/011-metrics_directives.t @@ -0,0 +1,120 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: + +use strict; +use lib '.'; +use t::TestWasmX; + +plan_tests(5); +run_tests(); + +__DATA__ + +=== TEST 1: metrics{} - empty block +--- valgrind +--- main_config + wasm { + metrics {} + } +--- no_error_log +[error] +[crit] +[emerg] +[stub] + + + +=== TEST 2: slab_size directive - sanity +--- main_config + wasm { + metrics { + slab_size 12k; + } + } +--- no_error_log +[error] +[crit] +[emerg] +[stub] + + + +=== TEST 3: slab_size directive - too small +--- main_config + wasm { + metrics { + slab_size 1k; + } + } +--- error_log eval +qr/\[emerg\] .*? \[wasm\] shm size of \d+ bytes is too small, minimum required is 12288 bytes/ +--- no_error_log +[error] +[crit] +[stub] +--- must_die + + + +=== TEST 4: slab_size directive - invalid size +--- main_config + wasm { + metrics { + slab_size 1x; + } + } +--- error_log eval +qr/\[emerg\] .*? \[wasm\] invalid shm size "1x"/ +--- no_error_log +[error] +[crit] +[stub] +--- must_die + + + +=== TEST 5: slab_size directive - duplicate +--- main_config + wasm { + metrics { + slab_size 12k; + slab_size 12k; + } + } +--- error_log: is duplicate +--- no_error_log +[error] +[crit] +[stub] +--- must_die + + + +=== TEST 6: max_metric_name_length directive - sanity +--- main_config + wasm { + metrics { + max_metric_name_length 64; + } + } +--- no_error_log +[error] +[crit] +[emerg] +[stub] + + + +=== TEST 7: max_metric_name_length directive - duplicate +--- main_config + wasm { + metrics { + max_metric_name_length 64; + max_metric_name_length 64; + } + } +--- error_log: is duplicate +--- no_error_log +[error] +[crit] +[stub] +--- must_die diff --git a/t/03-proxy_wasm/hfuncs/contexts/150-proxy_define_metric.t b/t/03-proxy_wasm/hfuncs/contexts/150-proxy_define_metric.t new file mode 100644 index 000000000..01d5424a2 --- /dev/null +++ b/t/03-proxy_wasm/hfuncs/contexts/150-proxy_define_metric.t @@ -0,0 +1,170 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: + +use strict; +use lib '.'; +use t::TestWasmX; + +our $workers = 2; + +workers($workers); +if ($workers > 1) { + master_on(); +} + +plan_tests(6); +run_tests(); + +__DATA__ + +=== TEST 1: proxy_wasm contexts - proxy_define_metric - on_vm_start +Hostcalls filter prefixes the name of a metric with the phase in which it's +defined. A metric c1 defined within on_configure ends up named c1_Configure. + +--- skip_no_debug +--- valgrind +--- main_config + wasm { + module hostcalls $TEST_NGINX_CRATES_DIR/hostcalls.wasm 'define_metric'; + } +--- config + location /t { + proxy_wasm hostcalls; + return 200; + } +--- error_log eval +qr/defined counter ".+c1_OnVMStart" with id \d+/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 2: proxy_wasm contexts - proxy_define_metric - on_configure +--- valgrind +--- wasm_modules: hostcalls +--- load_nginx_modules: ngx_http_echo_module +--- config + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + metrics=c1,g1'; + echo ok; + } +--- grep_error_log eval: qr/defined metric \w+ as \d+ at \w+/ +--- grep_error_log_out eval +qr/defined metric c1_Configure as \d+ at Configure +defined metric g1_Configure as \d+ at Configure\n/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 3: proxy_wasm contexts - proxy_define_metric - on_tick +--- wasm_modules: hostcalls +--- load_nginx_modules: ngx_http_echo_module +--- config + location /t { + proxy_wasm hostcalls 'on_tick=define_metrics \ + tick_period=500 \ + n_sync_calls=1 \ + metrics=c1,g1'; + echo ok; + } +--- grep_error_log eval: qr/defined metric \w+ as \d+ at \w+/ +--- grep_error_log_out eval +qr/defined metric c1_Tick as \d+ at Tick +defined metric g1_Tick as \d+ at Tick\n/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 4: proxy_wasm contexts - proxy_define_metric - on: request_headers, request_body, response_headers, response_body, log +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +my $phases = CORE::join(',', qw( + request_headers + request_body + response_headers + response_body + log +)); + +qq{ + location /t { + proxy_wasm hostcalls 'on=$phases \ + test=/t/metrics/define \ + metrics=c1,g1'; + echo ok; + } +} +--- request +POST /t +hello +--- grep_error_log eval: qr/defined metric \w+ as \d+ at \w+/ +--- grep_error_log_out eval +my $checks; +my @phases = qw( + RequestHeaders + RequestBody + ResponseHeaders + ResponseBody + ResponseBody + Log +); + +foreach my $p (@phases) { + my $suffixed_c1 = "c1_" . $p; + my $suffixed_g1 = "g1_" . $p; + $checks .= " +?defined metric $suffixed_c1 as [0-9]+ at $p +defined metric $suffixed_g1 as [0-9]+ at $p\n"; +} + +qr/$checks/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 5: proxy_wasm contexts - proxy_define_metric - on_http_call_response +--- wasm_modules: hostcalls +--- load_nginx_modules: ngx_http_echo_module +--- http_config eval +--- config eval +qq{ + listen unix:$ENV{TEST_NGINX_UNIX_SOCKET}; + + location /dispatched { + return 200 "Hello back"; + } + + location /t { + proxy_wasm hostcalls 'test=/t/dispatch_http_call \ + host=unix:$ENV{TEST_NGINX_UNIX_SOCKET} \ + path=/dispatched \ + on_http_call_response=define_metrics \ + metrics=c1,g1'; + echo ok; + } +} +--- grep_error_log eval: qr/defined metric \w+ as \d+ at \w+/ +--- grep_error_log_out eval +qr/defined metric c1_HTTPCallResponse as \d+ at HTTPCallResponse +defined metric g1_HTTPCallResponse as \d+ at HTTPCallResponse\n/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] diff --git a/t/03-proxy_wasm/hfuncs/contexts/151-proxy_increment_metric.t b/t/03-proxy_wasm/hfuncs/contexts/151-proxy_increment_metric.t new file mode 100644 index 000000000..199079de3 --- /dev/null +++ b/t/03-proxy_wasm/hfuncs/contexts/151-proxy_increment_metric.t @@ -0,0 +1,172 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: + +use strict; +use lib '.'; +use t::TestWasmX; + +skip_hup(); + +our $workers = 2; + +workers($workers); +if ($workers > 1) { + master_on(); +} + +plan_tests(6); +run_tests(); + +__DATA__ + +=== TEST 1: proxy_wasm contexts - proxy_increment_metric - on_vm_start +--- skip_no_debug +--- valgrind +--- main_config + wasm { + module hostcalls $TEST_NGINX_CRATES_DIR/hostcalls.wasm 'increment_metric'; + } +--- config + location /t { + proxy_wasm hostcalls; + return 200; + } +--- error_log eval +qr/updating metric "\d+" with 1/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 2: proxy_wasm metrics shm - proxy_increment_metric - on_configure +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config + location /t { + proxy_wasm hostcalls 'on_configure=define_and_increment_counters \ + metrics=c2'; + echo ok; + } +--- error_log eval +qr/c1_Configure: $::workers at Configure/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 3: proxy_wasm metrics - proxy_increment_metric() - on_tick +--- wasm_modules: hostcalls +--- load_nginx_modules: ngx_http_echo_module +--- config + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + on_tick=increment_counters \ + tick_period=500 \ + n_sync_calls=1 \ + metrics=c2'; + echo ok; + } +--- error_log eval +qr/c1_Configure: $::workers at Tick/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 4: proxy_wasm metrics - proxy_increment_metric() - on: request_headers, request_body, response_headers, response_body, log +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +my $phases = CORE::join(',', qw( + request_headers + request_body + response_headers + response_body + log +)); + +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=$phases \ + test=/t/metrics/increment_counters \ + metrics=c2'; + echo ok; + } +} +--- request +POST /t +hello +--- grep_error_log eval: qr/(incremented \w+|\w+: \d+) at \w+/ +--- grep_error_log_out eval +my $checks; +my $i = 0; +my @phases = qw( + RequestHeaders + RequestBody + ResponseHeaders + ResponseBody + ResponseBody + Log +); + +foreach my $p (@phases) { + $i++; + $checks .= " +?incremented c1_Configure at $p +incremented c2_Configure at $p +c1_Configure: $i at $p +c2_Configure: $i at $p\n"; +} + +qr/$checks/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 5: proxy_wasm metrics - proxy_increment_metric() - on_http_call_response +--- wasm_modules: hostcalls +--- load_nginx_modules: ngx_http_echo_module +--- http_config eval +--- config eval +qq{ + listen unix:$ENV{TEST_NGINX_UNIX_SOCKET}; + + location /dispatched { + return 200 "Hello back"; + } + + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + test=/t/dispatch_http_call \ + host=unix:$ENV{TEST_NGINX_UNIX_SOCKET} \ + path=/dispatched \ + on_http_call_response=increment_counters \ + metrics=c2'; + echo ok; + } +} +--- grep_error_log eval: qr/(incremented \w+|\w+: \d+) at \w+/ +--- grep_error_log_out eval +qr/incremented c1_Configure at HTTPCallResponse +incremented c2_Configure at HTTPCallResponse +c1_Configure: 1 at HTTPCallResponse +c2_Configure: 1 at HTTPCallResponse\n/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] diff --git a/t/03-proxy_wasm/hfuncs/contexts/152-proxy_record_metric.t b/t/03-proxy_wasm/hfuncs/contexts/152-proxy_record_metric.t new file mode 100644 index 000000000..0f35d8244 --- /dev/null +++ b/t/03-proxy_wasm/hfuncs/contexts/152-proxy_record_metric.t @@ -0,0 +1,203 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: + +use strict; +use lib '.'; +use t::TestWasmX; + +skip_hup(); + +our $workers = 2; + +workers($workers); +if ($workers > 1) { + master_on(); +} + +plan_tests(6); +run_tests(); + +__DATA__ + +=== TEST 1: proxy_wasm contexts - proxy_record_metric - on_vm_start +--- skip_no_debug +--- valgrind +--- main_config + wasm { + module hostcalls $TEST_NGINX_CRATES_DIR/hostcalls.wasm 'record_metric'; + } +--- config + location /t { + proxy_wasm hostcalls; + return 200; + } +--- error_log eval +qr/updating metric "\d+" with 1/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 2: proxy_wasm metrics shm - proxy_record_metric - on_configure +--- workers: 1 +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config + location /t { + proxy_wasm hostcalls 'metrics=g2 \ + on_configure=define_and_toggle_gauges'; + echo ok; + } +--- grep_error_log eval: qr/(toggled \w+|\w+: \d+) at \w+/ +--- grep_error_log_out eval +my $check; +$check .= "toggled g1_Configure at Configure(\n|\n.+\n)"; +$check .= "toggled g2_Configure at Configure(\n|\n.+\n)"; +$check .= "g1_Configure: 1 at Configure(\n|\n.+\n)"; +$check .= "g2_Configure: 1 at Configure(\n|\n.+\n)"; +qr/$check/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 3: proxy_wasm metrics - proxy_record_metric - on_tick +--- wasm_modules: hostcalls +--- load_nginx_modules: ngx_http_echo_module +--- config eval +my $filters; + +foreach my $wid (0 .. $::workers - 1) { + my $wait = 100 + ($wid * 500); + $filters .= " + proxy_wasm hostcalls 'on_configure=define_metrics \ + on_tick=set_gauges \ + tick_period=$wait \ + n_sync_calls=1 \ + on_worker=$wid \ + value=$wid \ + metrics=g2';"; +} +qq{ + location /t { + $filters + + echo ok; + } +} +--- wait: 1 +--- grep_error_log eval: qr/(record \d+ on \w+|\w+: \d+) at \w+/ +--- grep_error_log_out eval +my $checks; + +foreach my $worker_id (0 .. $::workers - 1) { + $checks .= "record $worker_id on g1_Configure at Tick +record $worker_id on g2_Configure at Tick +g1_Configure: $worker_id at Tick +g2_Configure: $worker_id at Tick +"; +} + +qr/$checks/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 4: proxy_wasm metrics - proxy_record_metric - on: request_headers, request_body, response_headers, response_body, log +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +my $phases = CORE::join(',', qw( + request_headers + request_body + response_headers + response_body + log +)); + +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=$phases \ + test=/t/metrics/toggle_gauges \ + metrics=g2'; + echo ok; + } +} +--- request +POST /t +hello +--- grep_error_log eval: qr/(toggled \w+|\w+: \d+) at \w+/ +--- grep_error_log_out eval +my $i = 0; +my $checks; +my @phases = qw( + RequestHeaders + RequestBody + ResponseHeaders + ResponseBody + ResponseBody + Log +); + +foreach my $phase (@phases) { + $i = $i ? 0 : 1; + $checks .= " +?toggled g1_Configure at $phase +toggled g2_Configure at $phase +g1_Configure: $i at $phase +g2_Configure: $i at $phase\n"; +} + +qr/$checks/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 5: proxy_wasm metrics - proxy_record_metric - on_http_call_response +--- wasm_modules: hostcalls +--- load_nginx_modules: ngx_http_echo_module +--- http_config eval +--- config eval +qq{ + listen unix:$ENV{TEST_NGINX_UNIX_SOCKET}; + + location /dispatched { + return 200 "Hello back"; + } + + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + test=/t/dispatch_http_call \ + host=unix:$ENV{TEST_NGINX_UNIX_SOCKET} \ + path=/dispatched \ + on_http_call_response=toggle_gauges \ + metrics=g2'; + echo ok; + } +} +--- grep_error_log eval: qr/(toggled \w+|\w+: \d+) at \w+/ +--- grep_error_log_out eval +qr/toggled g1_Configure at HTTPCallResponse +toggled g2_Configure at HTTPCallResponse +g1_Configure: 1 at HTTPCallResponse +g2_Configure: 1 at HTTPCallResponse\n/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] diff --git a/t/03-proxy_wasm/hfuncs/contexts/153-proxy_record_metric_histogram.t b/t/03-proxy_wasm/hfuncs/contexts/153-proxy_record_metric_histogram.t new file mode 100644 index 000000000..3c46fd11f --- /dev/null +++ b/t/03-proxy_wasm/hfuncs/contexts/153-proxy_record_metric_histogram.t @@ -0,0 +1,188 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: + +use strict; +use lib '.'; +use t::TestWasmX; + +skip_hup(); + +our $workers = 2; + +workers($workers); +if ($workers > 1) { + master_on(); +} + +plan_tests(7); +run_tests(); + +__DATA__ + +=== TEST 1: proxy_wasm metrics shm - record_metric, histogram - sanity +--- skip_no_debug +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +my $filters; + +foreach my $exp (0 .. 17) { + my $v = 2 ** $exp; + $filters .= " + proxy_wasm hostcalls 'on_configure=define_metrics \ + test=/t/metrics/record_histograms \ + metrics=h1 \ + value=$v';"; +} +qq{ + location /t { + $filters + + echo ok; + } +} +--- error_log eval +[ + "growing histogram", + qr/histogram "\d+": 1: 1; 2: 1; 4: 1; 8: 1; 16: 1; 32: 1; 64: 1; 128: 1; 256: 1; 512: 1; 1024: 1; 2048: 1; 4096: 1; 8192: 1; 16384: 1; 32768: 1; 65536: 1; 4294967295: 1;/ +] +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 2: proxy_wasm metrics shm - record_metric, histogram - on_configure +--- skip_no_debug +--- workers: 2 +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config + location /t { + proxy_wasm hostcalls 'on_configure=define_and_record_histograms \ + test=/t/metrics/record_histograms \ + metrics=h1 \ + value=10'; + echo ok; + } +--- grep_error_log eval: qr/histogram "\d+":( \d+: \d+;)+/ +--- grep_error_log_out eval +qr/histogram "\d+": 16: $::workers; 4294967295: 0;/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] +[stub] + + + +=== TEST 3: proxy_wasm metrics - record_metric(), histogram - on_tick +--- skip_no_debug +--- wasm_modules: hostcalls +--- load_nginx_modules: ngx_http_echo_module +--- config eval +my $filters; + +foreach my $wid (0 .. $::workers - 1) { + $filters .= " + proxy_wasm hostcalls 'on_configure=define_metrics \ + on_tick=record_histograms \ + tick_period=100 \ + n_sync_calls=1 \ + on_worker=$wid \ + value=1 \ + metrics=h2';"; +} +qq{ + location /t { + $filters + + echo ok; + } +} +--- grep_error_log eval: qr/histogram "\d+":( \d+: \d+;)+/ +--- grep_error_log_out eval +qr/histogram "\d+": 1: $::workers; 4294967295: 0;/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] +[stub] + + + +=== TEST 4: proxy_wasm metrics - record_metric(), histogram - on: request_headers, request_body, response_headers, response_body +--- skip_no_debug +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +my $phases = CORE::join(',', qw( + request_headers + request_body + response_headers + response_body +)); + +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=$phases \ + test=/t/metrics/record_histograms \ + value=100 \ + metrics=h1'; + echo ok; + } +} +--- request +POST /t +hello +--- grep_error_log eval: qr/histogram "\d+":( \d+: \d+;)+/ +--- grep_error_log_out eval +qr/histogram "\d+": 128: 4; 4294967295: 0;/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] +[stub] + + + +=== TEST 5: proxy_wasm metrics - record_metric(), histogram - on_http_call_response +--- skip_no_debug +--- wasm_modules: hostcalls +--- load_nginx_modules: ngx_http_echo_module +--- http_config eval +--- config eval +qq{ + listen unix:$ENV{TEST_NGINX_UNIX_SOCKET}; + + location /dispatched { + return 200 "Hello back"; + } + + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + test=/t/dispatch_http_call \ + host=unix:$ENV{TEST_NGINX_UNIX_SOCKET} \ + path=/dispatched \ + on_http_call_response=record_histograms \ + value=1000 \ + metrics=h2'; + echo ok; + } +} +--- grep_error_log eval: qr/histogram "\d+":( \d+: \d+;)+/ +--- grep_error_log_out eval +qr/histogram "\d+": 1024: 1; 4294967295: 0;/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] +[stub] diff --git a/t/03-proxy_wasm/hfuncs/metrics/001-define_metric_edge_cases.t b/t/03-proxy_wasm/hfuncs/metrics/001-define_metric_edge_cases.t new file mode 100644 index 000000000..442da35cf --- /dev/null +++ b/t/03-proxy_wasm/hfuncs/metrics/001-define_metric_edge_cases.t @@ -0,0 +1,123 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: + +use strict; +use lib '.'; +use t::TestWasmX; + +skip_hup(); +no_shuffle(); + +plan_tests(6); +run_tests(); + +__DATA__ + +=== TEST 1: proxy_wasm metrics - define_metric() metric name too long +In SIGHUP mode, this test fails if executed after a test that defined metrics, +as any existing metric whose name exceeds `max_metric_name_length` won't be +successfully reallocated causing the reconfiguration to fail. + +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- main_config eval +qq{ + wasm { + module hostcalls $ENV{TEST_NGINX_CRATES_DIR}/hostcalls.wasm; + + metrics { + max_metric_name_length 4; + } + } +} +--- config + location /t { + proxy_wasm hostcalls 'on=request_headers \ + test=/t/metrics/define \ + metrics=c1'; + echo ok; + } +--- error_code: 500 +--- error_log eval +[ + qr/.+on_request_headers.+/, + qr/host trap \(internal error\): metric name too long.*/, +] +--- no_error_log +[emerg] +[alert] +[stub] + + + +=== TEST 2: proxy_wasm metrics - define_metric() no memory +In SIGHUP mode, this test fails if executed after a test that defined more +metrics than it's possible to fit in `5m`. + +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- main_config eval +qq{ + wasm { + module hostcalls $ENV{TEST_NGINX_CRATES_DIR}/hostcalls.wasm; + + metrics { + slab_size 5m; + max_metric_name_length 128; + } + } +} +--- config + location /t { + proxy_wasm hostcalls 'on=request_headers \ + test=/t/metrics/define \ + metrics=c20337 \ + metrics_name_len=100'; + echo ok; + } +--- error_code: 500 +--- error_log eval +[ + qr/\[crit\] .+ \[wasm\] "metrics" shm store: no memory; cannot allocate pair with key size \d+ and value size \d+/, + qr/.+on_request_headers.+/, + qr/host trap \(internal error\): could not define metric.*/, +] +--- no_error_log +[emerg] +[alert] + + + +=== TEST 3: proxy_wasm metrics - define_metric() no memory, histogram +In SIGHUP mode, this test fails if executed after a test that defined more +metrics than it's possible to fit in `5m`. + +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- main_config eval +qq{ + wasm { + module hostcalls $ENV{TEST_NGINX_CRATES_DIR}/hostcalls.wasm; + + metrics { + slab_size 16k; + max_metric_name_length 128; + } + } +} +--- config + location /t { + proxy_wasm hostcalls 'on=request_headers \ + test=/t/metrics/define \ + metrics=c30,h16'; + echo ok; + } +--- error_code: 500 +--- error_log eval +[ + "cannot allocate histogram", + qr/.+on_request_headers.+/, + qr/host trap \(internal error\): could not define metric.*/, +] +--- no_error_log +[emerg] +[alert] diff --git a/t/03-proxy_wasm/hfuncs/metrics/002-get_metric_misuse.t b/t/03-proxy_wasm/hfuncs/metrics/002-get_metric_misuse.t new file mode 100644 index 000000000..652646e83 --- /dev/null +++ b/t/03-proxy_wasm/hfuncs/metrics/002-get_metric_misuse.t @@ -0,0 +1,32 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: + +use strict; +use lib '.'; +use t::TestWasmX; + +plan_tests(7); +run_tests(); + +__DATA__ + +=== TEST 1: proxy_wasm metrics - get_metric() invalid metric id +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config + location /t { + proxy_wasm hostcalls 'on=request_headers \ + test=/t/metrics/get_invalid_metric'; + echo ok; + } +--- error_code: 500 +--- error_log eval +[ + qr/.+on_request_headers.+/, + qr/metric \"0\" not found.*/, +] +--- no_error_log +[crit] +[emerg] +[alert] +[stub] diff --git a/t/03-proxy_wasm/hfuncs/metrics/003-increment_metric_misuse.t b/t/03-proxy_wasm/hfuncs/metrics/003-increment_metric_misuse.t new file mode 100644 index 000000000..56b641bb7 --- /dev/null +++ b/t/03-proxy_wasm/hfuncs/metrics/003-increment_metric_misuse.t @@ -0,0 +1,60 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: + +use strict; +use lib '.'; +use t::TestWasmX; + +plan_tests(7); +run_tests(); + +__DATA__ + +=== TEST 1: proxy_wasm metrics - increment_metric() gauge +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=request_headers \ + test=/t/metrics/increment_gauges \ + metrics=g1,g2'; + echo ok; + } +} +--- error_code: 500 +--- error_log eval +[ + qr/\[error\] .+ \[wasm\] attempt to call increment_metric on a gauge; operation not supported/, + qr/.+on_request_headers.+/, + qr/host trap \(internal error\): could not increment metric.*/, +] +--- no_error_log +[crit] +[emerg] +[alert] + + + +=== TEST 2: proxy_wasm metrics - increment_metric() invalid metric id +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config + location /t { + proxy_wasm hostcalls 'on=request_headers \ + test=/t/metrics/increment_invalid_counter'; + echo ok; + } +--- error_code: 500 +--- error_log eval +[ + qr/.+on_request_headers.+/, + qr/metric \"\d+\" not found.*/, + qr/host trap \(internal error\): could not increment metric.*/, +] +--- no_error_log +[crit] +[emerg] +[alert] diff --git a/t/03-proxy_wasm/hfuncs/metrics/004-record_metric_misuse.t b/t/03-proxy_wasm/hfuncs/metrics/004-record_metric_misuse.t new file mode 100644 index 000000000..f1e6532d6 --- /dev/null +++ b/t/03-proxy_wasm/hfuncs/metrics/004-record_metric_misuse.t @@ -0,0 +1,58 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: + +use strict; +use lib '.'; +use t::TestWasmX; + +plan_tests(7); +run_tests(); + +__DATA__ + +=== TEST 1: proxy_wasm metrics - record_metric() counter +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=request_headers \ + test=/t/metrics/toggle_counters \ + metrics=c1,c2'; + echo ok; + } +--- error_code: 500 +--- error_log eval +[ + qr/\[error\] .+ \[wasm\] attempt to call record_metric on a counter; operation not supported/, + qr/.+on_request_headers.+/, + qr/host trap \(internal error\): could not record metric.*/, +] +--- no_error_log +[crit] +[emerg] +[alert] + + + +=== TEST 2: proxy_wasm metrics - record_metric() invalid metric id +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config + location /t { + proxy_wasm hostcalls 'on=request_headers \ + test=/t/metrics/set_invalid_gauge'; + echo ok; + } +--- error_code: 500 +--- error_log eval +[ + qr/.+on_request_headers.+/, + qr/metric \"0\" not found.*/, +] +--- no_error_log +[crit] +[emerg] +[alert] +[stub] diff --git a/t/03-proxy_wasm/hfuncs/metrics/005-record_metric_edge_cases.t b/t/03-proxy_wasm/hfuncs/metrics/005-record_metric_edge_cases.t new file mode 100644 index 000000000..fb43a0aea --- /dev/null +++ b/t/03-proxy_wasm/hfuncs/metrics/005-record_metric_edge_cases.t @@ -0,0 +1,74 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: + +use strict; +use lib '.'; +use t::TestWasmX; + +skip_hup(); +no_shuffle(); + +plan_tests(4); +run_tests(); + +__DATA__ + +=== TEST 1: proxy_wasm metrics - record_metric() no memory, can't expand histogram +In SIGHUP mode, this test fails if executed after a test that defined more +metrics than it's possible to fit in `5m`. + +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- main_config eval +qq{ + wasm { + module hostcalls $ENV{TEST_NGINX_CRATES_DIR}/hostcalls.wasm; + + metrics { + slab_size 5m; + max_metric_name_length 128; + } + } +} +--- config + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=request_headers \ + test=/t/metrics/record_histograms \ + metrics=h16256 \ + value=1 \ + metrics_name_len=115'; + + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=request_headers \ + test=/t/metrics/record_histograms \ + metrics=h16256 \ + value=2 \ + metrics_name_len=115'; + + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=request_headers \ + test=/t/metrics/record_histograms \ + metrics=h16256 \ + value=4 \ + metrics_name_len=115'; + + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=request_headers \ + test=/t/metrics/record_histograms \ + metrics=h16256 \ + value=8 \ + metrics_name_len=115'; + + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=request_headers \ + test=/t/metrics/record_histograms \ + metrics=h16256 \ + value=16 \ + metrics_name_len=115'; + echo ok; + } +--- error_log +cannot expand histogram +--- no_error_log +[emerg] +[alert] diff --git a/t/07-metrics/001-metrics_sighup.t b/t/07-metrics/001-metrics_sighup.t new file mode 100644 index 000000000..c406ff5c7 --- /dev/null +++ b/t/07-metrics/001-metrics_sighup.t @@ -0,0 +1,199 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: +use strict; +use lib '.'; +use t::TestWasmX; + +skip_no_hup(); + +our $workers = 2; + +our $metrics = "c2,g2,h2"; + +workers($workers); +if ($workers > 1) { + master_on(); +} + +no_shuffle(); +plan_tests(8); +run_tests(); + +__DATA__ + +=== TEST 1: SIGHUP metrics - define metrics and increment counters +Evaluating counters values in error_log rather than in response headers as some +worker(s) might not have done their increment by the time response_headers phase +is invoked. + +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_and_increment_counters \ + metrics=$::metrics'; + echo ok; + } +} +--- error_log eval +qr/c2_Configure: $::workers.*/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] +[stub] +[stub] + + + +=== TEST 2: SIGHUP metrics - shm preserved, no realloc +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_and_increment_counters \ + on=response_headers \ + test=/t/metrics/get \ + metrics=$::metrics'; + echo ok; + } +} +--- response_headers +c1-Configure: 4 +c2-Configure: 4 +--- no_error_log +reallocating metric +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 3: SIGHUP metrics - increased worker_processes - shm preserved, realloc +--- workers: 4 +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_and_increment_counters \ + on=response_headers \ + test=/t/metrics/get \ + metrics=$::metrics'; + echo ok; + } +} +--- response_headers +c1-Configure: 8 +c2-Configure: 8 +--- error_log: reallocating metric +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 4: SIGHUP metrics - decreased worker_processes - shm preserved, realloc +--- workers: 2 +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_and_increment_counters \ + on=response_headers \ + test=/t/metrics/get \ + metrics=$::metrics'; + echo ok; + } +} +--- response_headers +c1-Configure: 10 +c2-Configure: 10 +--- error_log: reallocating metric +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 5: SIGHUP metrics - decreased slab_size - shm preserved, realloc +--- workers: 2 +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- main_config eval +qq{ + wasm { + module hostcalls $ENV{TEST_NGINX_CRATES_DIR}/hostcalls.wasm; + + metrics { + slab_size 4m; + } + } +} +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_and_increment_counters \ + on=response_headers \ + test=/t/metrics/get \ + metrics=$::metrics'; + echo ok; + } +} +--- response_headers +c1-Configure: 12 +c2-Configure: 12 +--- error_log: reallocating metric +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 6: SIGHUP metrics - increased slab_size - shm preserved, realloc +--- workers: 2 +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- main_config eval +qq{ + wasm { + module hostcalls $ENV{TEST_NGINX_CRATES_DIR}/hostcalls.wasm; + + metrics { + slab_size 5m; + } + } +} +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_and_increment_counters \ + on=response_headers \ + test=/t/metrics/get \ + metrics=$::metrics'; + echo ok; + } +} +--- response_headers +c1-Configure: 14 +c2-Configure: 14 +--- error_log: reallocating metric +--- no_error_log +[error] +[crit] +[emerg] +[alert] diff --git a/t/07-metrics/002-histograms_sighup.t b/t/07-metrics/002-histograms_sighup.t new file mode 100644 index 000000000..5ce503693 --- /dev/null +++ b/t/07-metrics/002-histograms_sighup.t @@ -0,0 +1,186 @@ +# vim:set ft= ts=4 sts=4 sw=4 et fdm=marker: +use strict; +use lib '.'; +use t::TestWasmX; + +skip_no_hup(); + +our $workers = 2; +our $total = 0; + +workers($workers); +if ($workers > 1) { + master_on(); +} + +no_shuffle(); +plan_tests(6); +run_tests(); + +__DATA__ + +=== TEST 1: SIGHUP metrics - define metrics and record histograms +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_and_record_histograms \ + metrics=c2,g2,h2'; + echo ok; + } +} +--- grep_error_log eval: qr/histogram "\d+":( \d+: \d+;)+/ +--- grep_error_log_out eval +$::total += $::workers; +qr/histogram "\d+": 1: $::total; 4294967295: 0;/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 2: SIGHUP metrics - shm preserved, no realloc +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_and_record_histograms \ + metrics=h2'; + echo ok; + } +} +--- grep_error_log eval: qr/histogram "\d+":( \d+: \d+;)+/ +--- grep_error_log_out eval +$::total += $::workers; +qr/histogram "\d+": 1: $::total; 4294967295: 0;/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 3: SIGHUP metrics - increased worker_processes - shm preserved, realloc +--- workers: 4 +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_and_record_histograms \ + metrics=h2'; + echo ok; + } +} +--- grep_error_log eval: qr/histogram "\d+":( \d+: \d+;)+/ +--- grep_error_log_out eval +$::total += 4; +qr/histogram "\d+": 1: $::total; 4294967295: 0;/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 4: SIGHUP metrics - decreased worker_processes - shm preserved, realloc +--- workers: 2 +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- wasm_modules: hostcalls +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_and_record_histograms \ + metrics=h2'; + echo ok; + } +} +--- grep_error_log eval: qr/histogram "\d+":( \d+: \d+;)+/ +--- grep_error_log_out eval +$::total += 2; +qr/histogram "\d+": 1: $::total; 4294967295: 0;/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 5: SIGHUP metrics - decreased slab_size - shm preserved, realloc +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- main_config eval +qq{ + wasm { + module hostcalls $ENV{TEST_NGINX_CRATES_DIR}/hostcalls.wasm; + + metrics { + slab_size 120k; + } + } +} +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=request_headers \ + test=/t/metrics/record_histograms \ + metrics=h2'; + echo ok; + } +} +--- grep_error_log eval: qr/histogram "\d+":( \d+: \d+;)+/ +--- grep_error_log_out eval +$::total += 1; +qr/histogram "\d+": 1: $::total; 4294967295: 0;/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] + + + +=== TEST 6: SIGHUP metrics - increased slab_size - shm preserved, realloc +--- valgrind +--- load_nginx_modules: ngx_http_echo_module +--- main_config eval +qq{ + wasm { + module hostcalls $ENV{TEST_NGINX_CRATES_DIR}/hostcalls.wasm; + + metrics { + slab_size 16k; + } + } +} +--- config eval +qq{ + location /t { + proxy_wasm hostcalls 'on_configure=define_metrics \ + on=response_headers \ + test=/t/metrics/record_histograms \ + metrics=h2'; + echo ok; + } +} +--- grep_error_log eval: qr/histogram "\d+":( \d+: \d+;)+/ +--- grep_error_log_out eval +$::total += 1; +qr/histogram "\d+": 1: $::total; 4294967295: 0;/ +--- no_error_log +[error] +[crit] +[emerg] +[alert] diff --git a/t/TestWasmX.pm b/t/TestWasmX.pm index c2c3df560..a8d207146 100644 --- a/t/TestWasmX.pm +++ b/t/TestWasmX.pm @@ -29,6 +29,7 @@ our @EXPORT = qw( load_nginx_modules plan_tests skip_hup + skip_no_hup skip_no_ssl skip_no_ipc skip_no_debug @@ -59,6 +60,12 @@ sub skip_hup { } } +sub skip_no_hup { + if ($ENV{TEST_NGINX_USE_HUP} == 0) { + plan(skip_all => "skip in default mode"); + } +} + sub skip_no_ssl { if ($nginxV !~ m/built with \S+SSL/) { plan(skip_all => "SSL support required (NGX_BUILD_SSL=1)"); @@ -201,6 +208,17 @@ add_block_preprocessor(sub { $wasm_config = $wasm_config . (join "\n", @arr); } + # --- metrics + + my $metrics = $block->metrics; + + if (defined $metrics) { + $wasm_config = $wasm_config . + " metrics {\n" . + " slab_size $metrics" . ";\n" . + " }\n"; + } + # --- shm_queue my $shm_queue = $block->shm_queue; diff --git a/t/lib/proxy-wasm-tests/hostcalls/src/filter.rs b/t/lib/proxy-wasm-tests/hostcalls/src/filter.rs index bea8a9391..0da33c0ba 100644 --- a/t/lib/proxy-wasm-tests/hostcalls/src/filter.rs +++ b/t/lib/proxy-wasm-tests/hostcalls/src/filter.rs @@ -1,4 +1,4 @@ -use crate::{test_http::*, types::*}; +use crate::{test_http::*, tests::*, types::*}; use http::StatusCode; use log::*; use proxy_wasm::{traits::*, types::*}; @@ -102,6 +102,10 @@ impl Context for TestHttp { Some(format!("called {} times", self.n_sync_calls + 1).as_str()), ); } + "define_metrics" => test_define_metrics(self, TestPhase::HTTPCallResponse), + "increment_counters" => test_increment_counters(self, TestPhase::HTTPCallResponse, None), + "toggle_gauges" => test_toggle_gauges(self, TestPhase::HTTPCallResponse, None), + "record_histograms" => test_record_metric(self, TestPhase::HTTPCallResponse), _ => {} } diff --git a/t/lib/proxy-wasm-tests/hostcalls/src/lib.rs b/t/lib/proxy-wasm-tests/hostcalls/src/lib.rs index 783432b53..c394985db 100644 --- a/t/lib/proxy-wasm-tests/hostcalls/src/lib.rs +++ b/t/lib/proxy-wasm-tests/hostcalls/src/lib.rs @@ -6,14 +6,15 @@ mod types; use crate::{tests::*, types::test_http::*, types::test_root::*, types::*}; use log::*; -use proxy_wasm::{traits::*, types::*}; -use std::{collections::HashMap, time::Duration}; +use proxy_wasm::{traits::*, types::*, hostcalls::*}; +use std::{collections::BTreeMap, collections::HashMap, time::Duration}; proxy_wasm::main! {{ proxy_wasm::set_log_level(LogLevel::Trace); proxy_wasm::set_root_context(|_| -> Box { Box::new(TestRoot { config: HashMap::new(), + metrics: BTreeMap::new(), n_sync_calls: 0, }) }); @@ -27,11 +28,27 @@ impl RootContext for TestRoot { if let Ok(text) = std::str::from_utf8(&config) { info!("vm config: {}", text); - if text == "do_trap" { - panic!("trap on_vm_start"); - } else if text == "do_false" { - info!("on_vm_start returning false"); - return false; + match text { + "do_trap" => panic!("trap on_vm_start"), + "do_false" => { + info!("on_vm_start returning false"); + return false; + } + "define_metric" => { + let c = "c1_OnVMStart"; + define_metric(MetricType::Counter, c).expect("cannot define new metric"); + } + "increment_metric" => { + let c = "c1_OnVMStart"; + let id = define_metric(MetricType::Counter, c).expect("cannot define new metric"); + increment_metric(id, 1).unwrap(); + } + "record_metric" => { + let g = "g1_OnVMStart"; + let id = define_metric(MetricType::Gauge, g).expect("cannot define new metric"); + record_metric(id, 1).unwrap(); + } + _ => () } } else { info!("cannot parse vm config"); @@ -63,12 +80,23 @@ impl RootContext for TestRoot { )); } - if let Some(on_configure) = self.get_config("on_configure") { - match on_configure { - "do_trap" => panic!("trap on_configure"), - "do_return_false" => return false, - _ => (), + match self.get_config("on_configure").unwrap_or("") { + "do_trap" => panic!("trap on_configure"), + "do_return_false" => return false, + "define_metrics" => test_define_metrics(self, TestPhase::Configure), + "define_and_increment_counters" => { + test_define_metrics(self, TestPhase::Configure); + test_increment_counters(self, TestPhase::Configure, None); + } + "define_and_toggle_gauges" => { + test_define_metrics(self, TestPhase::Configure); + test_toggle_gauges(self, TestPhase::Configure, None); } + "define_and_record_histograms" => { + test_define_metrics(self, TestPhase::Configure); + test_record_metric(self, TestPhase::Configure); + } + _ => (), } true @@ -84,15 +112,34 @@ impl RootContext for TestRoot { self.get_config("tick_period").unwrap() ); + let n_sync_calls = self + .config + .get("n_sync_calls") + .map_or(1, |v| v.parse().expect("bad n_sync_calls value")); + + if self.n_sync_calls >= n_sync_calls { + return; + } + match self.get_config("on_tick").unwrap_or("") { "log_property" => test_log_property(self), + "define_metrics" => { + test_define_metrics(self, TestPhase::Tick); + self.n_sync_calls += 1; + } + "increment_counters" => test_increment_counters(self, TestPhase::Tick, None), + "toggle_gauges" => test_toggle_gauges(self, TestPhase::Tick, None), + "set_gauges" => { + test_record_metric(self, TestPhase::Tick); + self.n_sync_calls += 1; + } + "record_histograms" => { + test_record_metric(self, TestPhase::Tick); + self.n_sync_calls += 1; + } + "log_metrics" => test_log_metrics(self, TestPhase::Tick), "set_property" => test_set_property(self), "dispatch" => { - let n_sync_calls = self - .config - .get("n_sync_calls") - .map_or(1, |v| v.parse().expect("bad n_sync_calls value")); - if self.n_sync_calls >= n_sync_calls { return; } @@ -172,6 +219,7 @@ impl RootContext for TestRoot { Some(Box::new(TestHttp { config: self.config.clone(), on_phases: phases, + metrics: self.metrics.clone(), n_sync_calls: 0, ncalls: 0, })) diff --git a/t/lib/proxy-wasm-tests/hostcalls/src/tests/mod.rs b/t/lib/proxy-wasm-tests/hostcalls/src/tests/mod.rs index 1b98e15bd..74e6e21f4 100644 --- a/t/lib/proxy-wasm-tests/hostcalls/src/tests/mod.rs +++ b/t/lib/proxy-wasm-tests/hostcalls/src/tests/mod.rs @@ -123,6 +123,14 @@ pub(crate) fn test_log_properties(ctx: &(dyn TestContext + 'static), phase: Test } } +pub(crate) fn test_log_metrics(ctx: &(dyn TestContext + 'static), phase: TestPhase) { + for (n, id) in ctx.get_metrics_mapping() { + if n.starts_with('h') { continue } + let value = get_metric(*id).unwrap(); + info!("{}: {} at {:?}", n, value, phase) + } +} + fn show_property(ctx: &(dyn TestContext + 'static), path: &[&str]) -> String { if let Some(p) = ctx.get_property(path.to_vec()) { if let Ok(value) = std::str::from_utf8(&p) { @@ -238,7 +246,9 @@ trait HeaderStr { impl HeaderStr for str { fn split_header<'a>(&'a self) -> Option<(&'a str, &'a str)> { if &self[0..1] == ":" { - self[1..].find(':').map(|n| (&self[0..n + 1], &self[n + 2..])) + self[1..] + .find(':') + .map(|n| (&self[0..n + 1], &self[n + 2..])) } else { self.split_once(':') } @@ -414,6 +424,130 @@ pub(crate) fn test_set_shared_data_by_len(ctx: &mut TestHttp) { test_set_shared_data(ctx); } +fn should_run_on_current_worker(ctx: &(dyn TestContext + 'static)) -> bool { + match ctx.get_config("on_worker") { + Some(on_worker) => { + let w_id_bytes = ctx + .get_property(vec![("worker_id")]) + .expect("could not get worker_id"); + let w_id = std::str::from_utf8(&w_id_bytes).expect("bad worker_id value"); + + on_worker == w_id + } + None => true, + } +} + +pub(crate) fn test_define_metrics(ctx: &mut (dyn TestContext + 'static), phase: TestPhase) { + if !should_run_on_current_worker(ctx) { + return; + } + + let name_len = ctx.get_config("metrics_name_len").map_or(0, |x| { + x.parse::().expect("bad metrics_name_len value") + }); + + let metrics_config = ctx + .get_config("metrics") + .map(|x| x.to_string()) + .expect("missing metrics parameter"); + + for metric in metrics_config.split(",") { + let metric_char = metric.chars().nth(0).unwrap(); + let metric_type = match metric_char { + 'c' => MetricType::Counter, + 'g' => MetricType::Gauge, + 'h' => MetricType::Histogram, + _ => panic!("unexpected metric type"), + }; + let n = metric[1..].parse::().expect("bad metrics value"); + + for i in 1..(n + 1) { + let mut name = format!("{}{}_{:?}", metric_char, i, phase); + + if name_len > 0 { + name = format!("{}{}", name, "x".repeat(name_len - name.chars().count())); + } + + let m_id = define_metric(metric_type, &name).expect("cannot define new metric"); + + info!("defined metric {} as {:?} at {:?}", &name, m_id, phase); + + ctx.save_metric_mapping(name.as_str(), m_id); + } + } +} + +pub(crate) fn test_increment_counters(ctx: &(dyn TestContext + 'static), phase: TestPhase, skip_others: Option) { + if !should_run_on_current_worker(ctx) { + return; + } + + let n_increments = ctx + .get_config("n_increments") + .map_or(1, |x| x.parse::().expect("bad n_increments value")); + + for (n, id) in ctx.get_metrics_mapping() { + if skip_others.unwrap_or(true) && !n.starts_with('c') { continue } + + for _ in 0..n_increments { + increment_metric(*id, 1).unwrap(); + } + + info!("incremented {} at {:?}", n, phase); + } + + test_log_metrics(ctx, phase); +} + +pub(crate) fn test_toggle_gauges(ctx: &(dyn TestContext + 'static), phase: TestPhase, skip_others: Option) { + if !should_run_on_current_worker(ctx) { + return; + } + + for (n, id) in ctx.get_metrics_mapping() { + if skip_others.unwrap_or(true) && !n.starts_with('g') { continue } + + let new_value = match get_metric(*id).unwrap() { + 0 => 1, + _ => 0, + }; + + record_metric(*id, new_value).unwrap(); + info!("toggled {} at {:?}", n, phase); + } + + test_log_metrics(ctx, phase); +} + +pub(crate) fn test_record_metric(ctx: &(dyn TestContext + 'static), phase: TestPhase) { + if !should_run_on_current_worker(ctx) { + return; + } + + let value = ctx + .get_config("value") + .map_or(1, |x| x.parse::().expect("bad value")); + + for (n, id) in ctx.get_metrics_mapping() { + if n.starts_with('c') { continue } + record_metric(*id, value).unwrap(); + info!("record {} on {} at {:?}", value, n, phase); + } + + test_log_metrics(ctx, phase); +} + +pub(crate) fn test_get_metrics(ctx: &TestHttp) { + for (n, id) in ctx.get_metrics_mapping() { + if n.starts_with('h') { continue } + + let name = n.replace("_", "-"); + let value = get_metric(*id).unwrap().to_string(); + ctx.add_http_response_header(name.as_str(), value.as_str()); + } +} + pub(crate) fn test_shared_queue_enqueue(ctx: &TestHttp) { let queue_id: u32 = ctx .config diff --git a/t/lib/proxy-wasm-tests/hostcalls/src/types/mod.rs b/t/lib/proxy-wasm-tests/hostcalls/src/types/mod.rs index c4f07751e..fc042ecc1 100644 --- a/t/lib/proxy-wasm-tests/hostcalls/src/types/mod.rs +++ b/t/lib/proxy-wasm-tests/hostcalls/src/types/mod.rs @@ -6,16 +6,21 @@ use crate::*; #[derive(Debug, Eq, PartialEq, enum_utils::FromStr)] #[enumeration(rename_all = "snake_case")] pub enum TestPhase { + Configure, + Tick, RequestHeaders, RequestBody, ResponseHeaders, ResponseBody, ResponseTrailers, + HTTPCallResponse, Log, } pub trait TestContext { fn get_config(&self, name: &str) -> Option<&str>; + fn get_metrics_mapping(&self) -> &BTreeMap; + fn save_metric_mapping(&mut self, name: &str, metric_id: u32) -> Option; } impl Context for dyn TestContext {} @@ -24,10 +29,26 @@ impl TestContext for TestRoot { fn get_config(&self, name: &str) -> Option<&str> { self.config.get(name).map(|s| s.as_str()) } + + fn get_metrics_mapping(&self) -> &BTreeMap { + &self.metrics + } + + fn save_metric_mapping(&mut self, name: &str, metric_id: u32) -> Option { + self.metrics.insert(name.to_string(), metric_id) + } } impl TestContext for TestHttp { fn get_config(&self, name: &str) -> Option<&str> { self.config.get(name).map(|s| s.as_str()) } + + fn get_metrics_mapping(&self) -> &BTreeMap { + &self.metrics + } + + fn save_metric_mapping(&mut self, name: &str, metric_id: u32) -> Option { + self.metrics.insert(name.to_string(), metric_id) + } } diff --git a/t/lib/proxy-wasm-tests/hostcalls/src/types/test_http.rs b/t/lib/proxy-wasm-tests/hostcalls/src/types/test_http.rs index 0fda3ce29..bd9ce7ae2 100644 --- a/t/lib/proxy-wasm-tests/hostcalls/src/types/test_http.rs +++ b/t/lib/proxy-wasm-tests/hostcalls/src/types/test_http.rs @@ -1,11 +1,12 @@ use crate::{tests::echo::*, tests::host::*, tests::*, *}; use http::StatusCode; use log::*; -use proxy_wasm::{traits::*, types::*}; +use proxy_wasm::{hostcalls::*, traits::*, types::*}; pub struct TestHttp { pub on_phases: Vec, pub config: HashMap, + pub metrics: BTreeMap, pub n_sync_calls: usize, pub ncalls: usize, } @@ -65,6 +66,7 @@ impl TestHttp { "/t/log/response_body" => test_log_response_body(self), "/t/log/property" => test_log_property(self), "/t/log/properties" => test_log_properties(self, cur_phase), + "/t/log/metrics" => test_log_metrics(self, cur_phase), /* send_local_response */ "/t/send_local_response/status/204" => test_send_status(self, 204), @@ -115,6 +117,27 @@ impl TestHttp { "/t/shm/enqueue" => test_shared_queue_enqueue(self), "/t/shm/dequeue" => test_shared_queue_dequeue(self), + /* metrics */ + "/t/metrics/define" => test_define_metrics(self, cur_phase), + "/t/metrics/increment_counters" => test_increment_counters(self, cur_phase, None), + "/t/metrics/increment_gauges" => { + let skip_non_counters = false; + test_increment_counters(self, cur_phase, Some(skip_non_counters)); + } + "/t/metrics/toggle_gauges" => test_toggle_gauges(self, cur_phase, None), + "/t/metrics/toggle_counters" => { + let skip_non_gauges = false; + test_toggle_gauges(self, cur_phase, Some(skip_non_gauges)); + } + "/t/metrics/record_histograms" => test_record_metric(self, cur_phase), + "/t/metrics/get" => test_get_metrics(self), + "/t/metrics/increment_invalid_counter" => increment_metric(0, 1).unwrap(), + "/t/metrics/set_invalid_gauge" => record_metric(0, 1).unwrap(), + "/t/metrics/get_invalid_metric" => { + info!("[hostcalls] retrieving invalid metric in \"{:?}\"", cur_phase); + get_metric(0).unwrap(); + } + /* errors */ "/t/trap" => panic!("custom trap"), "/t/error/get_response_body" => { diff --git a/t/lib/proxy-wasm-tests/hostcalls/src/types/test_root.rs b/t/lib/proxy-wasm-tests/hostcalls/src/types/test_root.rs index 95ad06857..6300c47a2 100644 --- a/t/lib/proxy-wasm-tests/hostcalls/src/types/test_root.rs +++ b/t/lib/proxy-wasm-tests/hostcalls/src/types/test_root.rs @@ -2,5 +2,6 @@ use crate::*; pub struct TestRoot { pub config: HashMap, + pub metrics: BTreeMap, pub n_sync_calls: usize, } diff --git a/util/setup_dev.sh b/util/setup_dev.sh index 02f19fbc2..4b3944401 100755 --- a/util/setup_dev.sh +++ b/util/setup_dev.sh @@ -136,6 +136,30 @@ EOF use_http2 use_http3 env_to_nginx +EOF + patch --forward --ignore-whitespace lib/perl5/Test/Nginx/Util.pm <<'EOF' + @@ -977,6 +977,11 @@ + my $post_main_config = $block->post_main_config; + my $err_log_file = $block->error_log_file; + my $server_name = $block->server_name; + + my $workers = $block->workers; + + + + if (!$workers) { + + $workers = $Workers; + + } + + if ($UseHup) { + master_on(); # config reload is buggy when master is off + @@ -1054,7 +1059,7 @@ + bail_out "Can't open $ConfFile for writing: $!\n"; + print $out "daemon $DaemonEnabled;" if ($DaemonEnabled eq 'off'); + print $out <<_EOC_; + -worker_processes $Workers; + +worker_processes $workers; + master_process $MasterProcessEnabled; + error_log $err_log_file $LogLevel; + pid $PidFile; + EOF set -e popd