From ba99b40ac5fcd3d679eb754d391adbf6fcfe2d3f Mon Sep 17 00:00:00 2001 From: Jun Ouyang Date: Thu, 25 Jan 2024 18:08:54 +0800 Subject: [PATCH 01/91] bump(deps): bump ngx_brotli version to master branch --- .requirements | 3 ++- BUILD.bazel | 13 ++++++++++++ build/openresty/BUILD.openresty.bazel | 21 ++++++++++++++----- build/openresty/brotli/BUILD.bazel | 0 .../openresty/brotli/brotli_repositories.bzl | 14 +++++++++++++ build/openresty/repositories.bzl | 2 ++ 6 files changed, 47 insertions(+), 6 deletions(-) create mode 100644 build/openresty/brotli/BUILD.bazel create mode 100644 build/openresty/brotli/brotli_repositories.bzl diff --git a/.requirements b/.requirements index 4dcc4172e79..8e687f97a79 100644 --- a/.requirements +++ b/.requirements @@ -18,4 +18,5 @@ WASMER=3.1.1 WASMTIME=14.0.3 V8=10.5.18 -NGX_BROTLI=25f86f0bac1101b6512135eac5f93c49c63609e3 # v1.0.0rc +NGX_BROTLI=a71f9312c2deb28875acc7bacfdd5695a111aa53 # master branch of Jan 23, 2024 +BROTLI=ed738e842d2fbdf2d6459e39267a633c4a9b2f5d # master branch of brotli deps submodule of Jan 23, 2024 \ No newline at end of file diff --git a/BUILD.bazel b/BUILD.bazel index 2ca22d6d1d5..d777e9ce443 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -123,6 +123,19 @@ config_setting( visibility = ["//visibility:public"], ) +bool_flag( + name = "brotli", + build_setting_default = True, +) + +config_setting( + name = "brotli_flag", + flag_values = { + ":brotli": "true", + }, + visibility = ["//visibility:public"], +) + config_setting( name = "debug_linux_flag", constraint_values = [ diff --git a/build/openresty/BUILD.openresty.bazel b/build/openresty/BUILD.openresty.bazel index 1dd2b0f476b..f840a650a63 100644 --- a/build/openresty/BUILD.openresty.bazel +++ b/build/openresty/BUILD.openresty.bazel @@ -149,6 +149,7 @@ CONFIGURE_OPTIONS = [ "--with-ld-opt=\"-L$$EXT_BUILD_DEPS$$/pcre/lib\"", "--with-ld-opt=\"-L$$EXT_BUILD_DEPS$$/openssl/lib\"", "--with-ld-opt=\"-L$$EXT_BUILD_DEPS$$/luajit/lib\"", + "--with-ld-opt=\"-L$$EXT_BUILD_DEPS$$/lib\"", # Here let's try not having --disable-new-dtags; --disable-new-dtags creates rpath instead of runpath # note rpath can't handle indirect dependency (nginx -> luajit -> dlopen("other")), so each indirect # dependency should have its rpath set (luajit, libxslt etc); on the other side, rpath is not @@ -168,7 +169,6 @@ CONFIGURE_OPTIONS = [ "--add-module=$$EXT_BUILD_ROOT$$/external/lua-kong-nginx-module/stream", "--add-module=$$EXT_BUILD_ROOT$$/external/lua-resty-lmdb", "--add-module=$$EXT_BUILD_ROOT$$/external/lua-resty-events", - "--add-module=$$EXT_BUILD_ROOT$$/external/ngx_brotli", ] + select({ "@kong//:aarch64-linux-anylibc-cross": [ "--crossbuild=Linux:aarch64", @@ -230,6 +230,11 @@ CONFIGURE_OPTIONS = [ "--group=nobody", ], "//conditions:default": [], +}) + select({ + "@kong//:brotli_flag": [ + "--add-module=$$EXT_BUILD_ROOT$$/external/ngx_brotli", + ], + "//conditions:default": [], }) + wasmx_configure_options # TODO: set prefix to populate pid_path, conf_path, log_path etc @@ -259,10 +264,10 @@ configure_make( configure_options = CONFIGURE_OPTIONS, data = [ "@lua-kong-nginx-module//:all_srcs", - "@lua-resty-lmdb//:all_srcs", "@lua-resty-events//:all_srcs", - "@openresty_binding//:all_srcs", + "@lua-resty-lmdb//:all_srcs", "@ngx_brotli//:all_srcs", + "@openresty_binding//:all_srcs", ] + select({ "@kong//:wasmx_flag": [ "@ngx_wasm_module//:all_srcs", @@ -284,9 +289,9 @@ configure_make( ], visibility = ["//visibility:public"], deps = [ - "@pcre", "@openresty//:luajit", - "@openssl//:openssl", + "@openssl", + "@pcre", ] + select({ "@kong//:any-cross": [ "@cross_deps_zlib//:zlib", @@ -299,5 +304,11 @@ configure_make( "@cross_deps_libxcrypt//:libxcrypt", ], "//conditions:default": [], + }) + select({ + "@kong//:brotli_flag": [ + "@brotli//:brotlicommon", + "@brotli//:brotlienc", + ], + "//conditions:default": [], }), ) diff --git a/build/openresty/brotli/BUILD.bazel b/build/openresty/brotli/BUILD.bazel new file mode 100644 index 00000000000..e69de29bb2d diff --git a/build/openresty/brotli/brotli_repositories.bzl b/build/openresty/brotli/brotli_repositories.bzl new file mode 100644 index 00000000000..6568fca3c41 --- /dev/null +++ b/build/openresty/brotli/brotli_repositories.bzl @@ -0,0 +1,14 @@ +"""A module defining the dependency """ + +load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") +load("@kong_bindings//:variables.bzl", "KONG_VAR") + +def brotli_repositories(): + maybe( + git_repository, + name = "brotli", + branch = KONG_VAR["BROTLI"], + remote = "https://github.com/google/brotli", + visibility = ["//visibility:public"], # let this to be referenced by openresty build + ) diff --git a/build/openresty/repositories.bzl b/build/openresty/repositories.bzl index 98e40eb491a..0d5434f9450 100644 --- a/build/openresty/repositories.bzl +++ b/build/openresty/repositories.bzl @@ -8,6 +8,7 @@ load("//build/openresty/pcre:pcre_repositories.bzl", "pcre_repositories") load("//build/openresty/openssl:openssl_repositories.bzl", "openssl_repositories") load("//build/openresty/atc_router:atc_router_repositories.bzl", "atc_router_repositories") load("//build/openresty/wasmx:wasmx_repositories.bzl", "wasmx_repositories") +load("//build/openresty/brotli:brotli_repositories.bzl", "brotli_repositories") # This is a dummy file to export the module's repository. _NGINX_MODULE_DUMMY_FILE = """ @@ -23,6 +24,7 @@ def openresty_repositories(): openssl_repositories() atc_router_repositories() wasmx_repositories() + brotli_repositories() openresty_version = KONG_VAR["OPENRESTY"] From 15aa0e4b32fecb1aeaefef8377272e328d2d5095 Mon Sep 17 00:00:00 2001 From: Jun Ouyang Date: Mon, 29 Jan 2024 16:04:20 +0800 Subject: [PATCH 02/91] chore(deps): disabled ngx_brotli on rhel7 rhel9-arm64 amazonlinux-2023-arm64 due to toolchain issues --- .github/matrix-full.yml | 6 +++--- changelog/unreleased/kong/bump_ngx_brotli.yml | 3 +++ .../explain_manifest/fixtures/amazonlinux-2023-arm64.txt | 1 - scripts/explain_manifest/fixtures/el7-amd64.txt | 1 - scripts/explain_manifest/fixtures/el9-arm64.txt | 1 - 5 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 changelog/unreleased/kong/bump_ngx_brotli.yml diff --git a/.github/matrix-full.yml b/.github/matrix-full.yml index 70b4787491e..b011607f4c8 100644 --- a/.github/matrix-full.yml +++ b/.github/matrix-full.yml @@ -38,7 +38,7 @@ build-packages: image: centos:7 package: rpm package-type: el7 - bazel-args: --//:wasmx_el7_workaround=true + bazel-args: --//:wasmx_el7_workaround=true --//:brotli=False check-manifest-suite: el7-amd64 - label: rhel-8 image: rockylinux:8 @@ -53,7 +53,7 @@ build-packages: - label: rhel-9-arm64 package: rpm package-type: el9 - bazel-args: --platforms=//:rhel9-crossbuild-aarch64 + bazel-args: --platforms=//:rhel9-crossbuild-aarch64 --//:brotli=False check-manifest-suite: el9-arm64 # Amazon Linux @@ -70,7 +70,7 @@ build-packages: - label: amazonlinux-2023-arm64 package: rpm package-type: aws2023 - bazel-args: --platforms=//:aws2023-crossbuild-aarch64 + bazel-args: --platforms=//:aws2023-crossbuild-aarch64 --//:brotli=False check-manifest-suite: amazonlinux-2023-arm64 build-images: diff --git a/changelog/unreleased/kong/bump_ngx_brotli.yml b/changelog/unreleased/kong/bump_ngx_brotli.yml new file mode 100644 index 00000000000..7c05da00c79 --- /dev/null +++ b/changelog/unreleased/kong/bump_ngx_brotli.yml @@ -0,0 +1,3 @@ +message: Bumped ngx_brotli to master branch, and disabled it on rhel7 rhel9-arm64 and amazonlinux-2023-arm64 due to toolchain issues +type: dependency +scope: Core diff --git a/scripts/explain_manifest/fixtures/amazonlinux-2023-arm64.txt b/scripts/explain_manifest/fixtures/amazonlinux-2023-arm64.txt index a9f1b4faf91..48576d505f1 100644 --- a/scripts/explain_manifest/fixtures/amazonlinux-2023-arm64.txt +++ b/scripts/explain_manifest/fixtures/amazonlinux-2023-arm64.txt @@ -169,7 +169,6 @@ - lua-kong-nginx-module/stream - lua-resty-events - lua-resty-lmdb - - ngx_brotli - ngx_wasm_module OpenSSL : OpenSSL 3.2.0 23 Nov 2023 DWARF : True diff --git a/scripts/explain_manifest/fixtures/el7-amd64.txt b/scripts/explain_manifest/fixtures/el7-amd64.txt index 34190b2b924..b0d0b772ff0 100644 --- a/scripts/explain_manifest/fixtures/el7-amd64.txt +++ b/scripts/explain_manifest/fixtures/el7-amd64.txt @@ -201,7 +201,6 @@ - lua-kong-nginx-module/stream - lua-resty-events - lua-resty-lmdb - - ngx_brotli - ngx_wasm_module OpenSSL : OpenSSL 3.2.0 23 Nov 2023 DWARF : True diff --git a/scripts/explain_manifest/fixtures/el9-arm64.txt b/scripts/explain_manifest/fixtures/el9-arm64.txt index a9f1b4faf91..48576d505f1 100644 --- a/scripts/explain_manifest/fixtures/el9-arm64.txt +++ b/scripts/explain_manifest/fixtures/el9-arm64.txt @@ -169,7 +169,6 @@ - lua-kong-nginx-module/stream - lua-resty-events - lua-resty-lmdb - - ngx_brotli - ngx_wasm_module OpenSSL : OpenSSL 3.2.0 23 Nov 2023 DWARF : True From cb1a3c1c0b6b9b7d1b0314d77e3b147865475de1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Mon, 29 Jan 2024 14:46:49 +0100 Subject: [PATCH 03/91] fix(redis): add default port for standardized redis config The default port should be 6379. This was how RateLimiting and Response-RateLimiting worked before redis config standardization. KAG-3618 --- kong/plugins/acme/schema.lua | 8 ++ kong/tools/redis/schema.lua | 7 +- .../30-standardized_redis_config_spec.lua | 106 ++++++++++++++++++ spec/02-integration/02-cmd/11-config_spec.lua | 24 +++- spec/03-plugins/29-acme/04-schema_spec.lua | 34 ++++++ .../kong/plugins/redis-dummy/handler.lua | 12 ++ .../kong/plugins/redis-dummy/schema.lua | 15 +++ 7 files changed, 198 insertions(+), 8 deletions(-) create mode 100644 spec/01-unit/30-standardized_redis_config_spec.lua create mode 100644 spec/fixtures/custom_plugins/kong/plugins/redis-dummy/handler.lua create mode 100644 spec/fixtures/custom_plugins/kong/plugins/redis-dummy/schema.lua diff --git a/kong/plugins/acme/schema.lua b/kong/plugins/acme/schema.lua index 276ec19317f..37a4bb99efd 100644 --- a/kong/plugins/acme/schema.lua +++ b/kong/plugins/acme/schema.lua @@ -271,6 +271,14 @@ local schema = { then_err = "terms of service must be accepted, see https://letsencrypt.org/repository/", } }, + { conditional = { + if_field = "config.storage", if_match = { eq = "redis" }, + then_field = "config.storage_config.redis.host", then_match = { required = true }, + } }, + { conditional = { + if_field = "config.storage", if_match = { eq = "redis" }, + then_field = "config.storage_config.redis.port", then_match = { required = true }, + } }, { custom_entity_check = { field_sources = { "config.storage", }, diff --git a/kong/tools/redis/schema.lua b/kong/tools/redis/schema.lua index 39f2c19b06d..8982698719b 100644 --- a/kong/tools/redis/schema.lua +++ b/kong/tools/redis/schema.lua @@ -7,7 +7,7 @@ return { description = "Redis configuration", fields = { { host = typedefs.host }, - { port = typedefs.port }, + { port = typedefs.port({ default = 6379 }), }, { timeout = typedefs.timeout { default = DEFAULT_TIMEOUT } }, { username = { description = "Username to use for Redis connections. If undefined, ACL authentication won't be performed. This requires Redis v6.0.0+. To be compatible with Redis v5.x.y, you can set it to `default`.", type = "string", referenceable = true @@ -31,9 +31,6 @@ return { default = false } }, { server_name = typedefs.sni { required = false } } - }, - entity_checks = { - { mutually_required = { "host", "port" }, }, - }, + } } } diff --git a/spec/01-unit/30-standardized_redis_config_spec.lua b/spec/01-unit/30-standardized_redis_config_spec.lua new file mode 100644 index 00000000000..3f2c5894fc4 --- /dev/null +++ b/spec/01-unit/30-standardized_redis_config_spec.lua @@ -0,0 +1,106 @@ +local schema_def = require "spec.fixtures.custom_plugins.kong.plugins.redis-dummy.schema" +local v = require("spec.helpers").validate_plugin_config_schema + + +describe("Validate standardized redis config schema", function() + describe("valid config", function() + it("accepts minimal redis config (populates defaults)", function() + local config = { + redis = { + host = "localhost" + } + } + local ok, err = v(config, schema_def) + assert.truthy(ok) + assert.same({ + host = "localhost", + port = 6379, + timeout = 2000, + username = ngx.null, + password = ngx.null, + database = 0, + ssl = false, + ssl_verify = false, + server_name = ngx.null, + }, ok.config.redis) + assert.is_nil(err) + end) + + it("full redis config", function() + local config = { + redis = { + host = "localhost", + port = 9900, + timeout = 3333, + username = "test", + password = "testXXX", + database = 5, + ssl = true, + ssl_verify = true, + server_name = "example.test" + } + } + local ok, err = v(config, schema_def) + assert.truthy(ok) + assert.same(config.redis, ok.config.redis) + assert.is_nil(err) + end) + + it("allows empty strings on password", function() + local config = { + redis = { + host = "localhost", + password = "", + } + } + local ok, err = v(config, schema_def) + assert.truthy(ok) + assert.same({ + host = "localhost", + port = 6379, + timeout = 2000, + username = ngx.null, + password = "", + database = 0, + ssl = false, + ssl_verify = false, + server_name = ngx.null, + }, ok.config.redis) + assert.is_nil(err) + end) + end) + + describe("invalid config", function() + it("rejects invalid config", function() + local config = { + redis = { + host = "", + port = -5, + timeout = -5, + username = 1, + password = 4, + database = "abc", + ssl = "abc", + ssl_verify = "xyz", + server_name = "test-test" + } + } + local ok, err = v(config, schema_def) + assert.falsy(ok) + assert.same({ + config = { + redis = { + database = 'expected an integer', + host = 'length must be at least 1', + password = 'expected a string', + port = 'value should be between 0 and 65535', + ssl = 'expected a boolean', + ssl_verify = 'expected a boolean', + timeout = 'value should be between 0 and 2147483646', + username = 'expected a string', + } + } + }, err) + end) + end) +end) diff --git a/spec/02-integration/02-cmd/11-config_spec.lua b/spec/02-integration/02-cmd/11-config_spec.lua index 4096b2189bc..0a32456f26a 100644 --- a/spec/02-integration/02-cmd/11-config_spec.lua +++ b/spec/02-integration/02-cmd/11-config_spec.lua @@ -81,6 +81,12 @@ describe("kong config", function() config: port: 10000 host: 127.0.0.1 + - name: rate-limiting + config: + minute: 200 + policy: redis + redis: + host: 127.0.0.1 plugins: - name: correlation-id id: 467f719f-a544-4a8f-bc4b-7cd12913a9d4 @@ -130,7 +136,7 @@ describe("kong config", function() local res = client:get("/services/bar/plugins") local body = assert.res_status(200, res) local json = cjson.decode(body) - assert.equals(2, #json.data) + assert.equals(3, #json.data) local res = client:get("/plugins/467f719f-a544-4a8f-bc4b-7cd12913a9d4") local body = assert.res_status(200, res) @@ -532,7 +538,17 @@ describe("kong config", function() local service2 = bp.services:insert({ name = "service2" }, { nulls = true }) local route2 = bp.routes:insert({ service = service2, methods = { "GET" }, name = "b" }, { nulls = true }) - local plugin3 = bp.tcp_log_plugins:insert({ + local plugin3 = bp.rate_limiting_plugins:insert({ + service = service2, + config = { + minute = 100, + policy = "redis", + redis = { + host = "localhost" + } + } + }, { nulls = true }) + local plugin4 = bp.tcp_log_plugins:insert({ service = service2, }, { nulls = true }) local consumer = bp.consumers:insert(nil, { nulls = true }) @@ -603,7 +619,7 @@ describe("kong config", function() assert.equals(route2.name, yaml.routes[2].name) assert.equals(service2.id, yaml.routes[2].service) - assert.equals(3, #yaml.plugins) + assert.equals(4, #yaml.plugins) table.sort(yaml.plugins, sort_by_name) assert.equals(plugin1.id, yaml.plugins[1].id) assert.equals(plugin1.name, yaml.plugins[1].name) @@ -615,6 +631,8 @@ describe("kong config", function() assert.equals(plugin3.id, yaml.plugins[3].id) assert.equals(plugin3.name, yaml.plugins[3].name) + assert.equals(plugin4.id, yaml.plugins[4].id) + assert.equals(plugin4.name, yaml.plugins[4].name) assert.equals(service2.id, yaml.plugins[3].service) assert.equals(1, #yaml.consumers) diff --git a/spec/03-plugins/29-acme/04-schema_spec.lua b/spec/03-plugins/29-acme/04-schema_spec.lua index 2bea9f9b01f..e6a5450361c 100644 --- a/spec/03-plugins/29-acme/04-schema_spec.lua +++ b/spec/03-plugins/29-acme/04-schema_spec.lua @@ -89,6 +89,40 @@ describe("Plugin: acme (schema)", function() } }, }, + ---------------------------------------- + { + name = "accepts valid redis config", + input = { + account_email = "example@example.com", + storage = "redis", + storage_config = { + redis = { + host = "localhost" + }, + } + }, + }, + ---------------------------------------- + { + name = "rejects invalid redis config", + input = { + account_email = "example@example.com", + storage = "redis", + storage_config = { + redis = { }, + } + }, + error = { + ["@entity"] = { "failed conditional validation given value of field 'config.storage'" }, + config = { + storage_config = { + redis = { + host = "required field missing", + } + } + }, + }, + }, } for _, t in ipairs(tests) do diff --git a/spec/fixtures/custom_plugins/kong/plugins/redis-dummy/handler.lua b/spec/fixtures/custom_plugins/kong/plugins/redis-dummy/handler.lua new file mode 100644 index 00000000000..8e13350051b --- /dev/null +++ b/spec/fixtures/custom_plugins/kong/plugins/redis-dummy/handler.lua @@ -0,0 +1,12 @@ +local kong = kong + +local RedisDummy = { + PRIORITY = 1000, + VERSION = "0.1.0", +} + +function RedisDummy:access(conf) + kong.log("access phase") +end + +return RedisDummy diff --git a/spec/fixtures/custom_plugins/kong/plugins/redis-dummy/schema.lua b/spec/fixtures/custom_plugins/kong/plugins/redis-dummy/schema.lua new file mode 100644 index 00000000000..7740f95064d --- /dev/null +++ b/spec/fixtures/custom_plugins/kong/plugins/redis-dummy/schema.lua @@ -0,0 +1,15 @@ +local redis_schema = require "kong.tools.redis.schema" + +return { + name = "redis-dummy", + fields = { + { + config = { + type = "record", + fields = { + { redis = redis_schema.config_schema }, + }, + }, + }, + }, +} From 4d03ca45edfa54c76e5d2d4271892f37b7524ddd Mon Sep 17 00:00:00 2001 From: Michael Martin Date: Mon, 29 Jan 2024 15:53:23 -0600 Subject: [PATCH 04/91] fix(wasm): do not call attach() on re-entrancy (#12402) --- changelog/unreleased/kong/wasm-attach.yml | 5 +++++ kong/runloop/wasm.lua | 25 ++++++++++++++--------- 2 files changed, 20 insertions(+), 10 deletions(-) create mode 100644 changelog/unreleased/kong/wasm-attach.yml diff --git a/changelog/unreleased/kong/wasm-attach.yml b/changelog/unreleased/kong/wasm-attach.yml new file mode 100644 index 00000000000..99ae358d401 --- /dev/null +++ b/changelog/unreleased/kong/wasm-attach.yml @@ -0,0 +1,5 @@ +message: > + **proxy-wasm**: Fixed "previous plan already attached" error thrown when a + filter triggers re-entrancy of the access handler. +type: bugfix +scope: Core diff --git a/kong/runloop/wasm.lua b/kong/runloop/wasm.lua index 70f36b798ad..9bb697cdda1 100644 --- a/kong/runloop/wasm.lua +++ b/kong/runloop/wasm.lua @@ -922,17 +922,22 @@ function _M.attach(ctx) ctx.ran_wasm = true - local ok, err = proxy_wasm.attach(chain.c_plan) - if not ok then - log(CRIT, "failed attaching ", chain.label, " filter chain to request: ", err) - return kong.response.error(500) - end + local ok, err + if not ctx.wasm_attached then + ctx.wasm_attached = true - ok, err = proxy_wasm.set_host_properties_handlers(properties.get, - properties.set) - if not ok then - log(CRIT, "failed setting host property handlers: ", err) - return kong.response.error(500) + ok, err = proxy_wasm.attach(chain.c_plan) + if not ok then + log(CRIT, "failed attaching ", chain.label, " filter chain to request: ", err) + return kong.response.error(500) + end + + ok, err = proxy_wasm.set_host_properties_handlers(properties.get, + properties.set) + if not ok then + log(CRIT, "failed setting host property handlers: ", err) + return kong.response.error(500) + end end jit.off(proxy_wasm.start) From 60ea714e124ec81bef97031b9d334febcfa9303b Mon Sep 17 00:00:00 2001 From: Makito Date: Tue, 30 Jan 2024 15:16:25 +0800 Subject: [PATCH 05/91] fix(plugins): consistent error responses upon Admin API auth failures (#12429) * fix(plugins): consistent error responses upon Admin API auth failures * fix(basic-auth): update error message --- .../kong/enhance_admin_api_auth_error_response.yml | 3 +++ kong/plugins/basic-auth/access.lua | 4 ++-- kong/plugins/key-auth/handler.lua | 2 +- kong/plugins/ldap-auth/access.lua | 2 +- spec/02-integration/02-cmd/03-reload_spec.lua | 2 +- spec/03-plugins/09-key-auth/02-access_spec.lua | 10 +++++----- spec/03-plugins/10-basic-auth/03-access_spec.lua | 10 +++++----- spec/03-plugins/10-basic-auth/05-declarative_spec.lua | 2 +- spec/03-plugins/20-ldap-auth/01-access_spec.lua | 6 +++--- 9 files changed, 22 insertions(+), 19 deletions(-) create mode 100644 changelog/unreleased/kong/enhance_admin_api_auth_error_response.yml diff --git a/changelog/unreleased/kong/enhance_admin_api_auth_error_response.yml b/changelog/unreleased/kong/enhance_admin_api_auth_error_response.yml new file mode 100644 index 00000000000..fb508af5573 --- /dev/null +++ b/changelog/unreleased/kong/enhance_admin_api_auth_error_response.yml @@ -0,0 +1,3 @@ +message: "Enhance error responses for authentication failures in the Admin API" +type: bugfix +scope: Plugin diff --git a/kong/plugins/basic-auth/access.lua b/kong/plugins/basic-auth/access.lua index 43fec7990cc..cd229709865 100644 --- a/kong/plugins/basic-auth/access.lua +++ b/kong/plugins/basic-auth/access.lua @@ -176,12 +176,12 @@ local function do_authentication(conf) if given_username and given_password then credential = load_credential_from_db(given_username) else - return false, unauthorized("Invalid authentication credentials", www_authenticate) + return false, unauthorized("Unauthorized", www_authenticate) end end if not credential or not validate_credentials(credential, given_password) then - return false, unauthorized("Invalid authentication credentials", www_authenticate) + return false, unauthorized("Unauthorized", www_authenticate) end -- Retrieve consumer diff --git a/kong/plugins/key-auth/handler.lua b/kong/plugins/key-auth/handler.lua index 0c711cca133..81b2e309a4f 100644 --- a/kong/plugins/key-auth/handler.lua +++ b/kong/plugins/key-auth/handler.lua @@ -30,7 +30,7 @@ local _realm = 'Key realm="' .. _KONG._NAME .. '"' local ERR_DUPLICATE_API_KEY = { status = 401, message = "Duplicate API key found" } local ERR_NO_API_KEY = { status = 401, message = "No API key found in request" } -local ERR_INVALID_AUTH_CRED = { status = 401, message = "Invalid authentication credentials" } +local ERR_INVALID_AUTH_CRED = { status = 401, message = "Unauthorized" } local ERR_INVALID_PLUGIN_CONF = { status = 500, message = "Invalid plugin configuration" } local ERR_UNEXPECTED = { status = 500, message = "An unexpected error occurred" } diff --git a/kong/plugins/ldap-auth/access.lua b/kong/plugins/ldap-auth/access.lua index 8ece16c9892..fd79e6f2dcc 100644 --- a/kong/plugins/ldap-auth/access.lua +++ b/kong/plugins/ldap-auth/access.lua @@ -263,7 +263,7 @@ local function do_authentication(conf) end if not is_authorized then - return false, {status = 401, message = "Invalid authentication credentials" } + return false, {status = 401, message = "Unauthorized" } end if conf.hide_credentials then diff --git a/spec/02-integration/02-cmd/03-reload_spec.lua b/spec/02-integration/02-cmd/03-reload_spec.lua index e70c84c97d4..2c6464304f6 100644 --- a/spec/02-integration/02-cmd/03-reload_spec.lua +++ b/spec/02-integration/02-cmd/03-reload_spec.lua @@ -697,7 +697,7 @@ describe("key-auth plugin invalidation on dbless reload #off", function() }) local body = res:read_body() proxy_client:close() - return body ~= [[{"message":"Invalid authentication credentials"}]] + return body ~= [[{"message":"Unauthorized"}]] end, 5) admin_client = assert(helpers.admin_client()) diff --git a/spec/03-plugins/09-key-auth/02-access_spec.lua b/spec/03-plugins/09-key-auth/02-access_spec.lua index c75904f057f..4830ab8ce4d 100644 --- a/spec/03-plugins/09-key-auth/02-access_spec.lua +++ b/spec/03-plugins/09-key-auth/02-access_spec.lua @@ -291,7 +291,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) end) it("handles duplicated key in querystring", function() local res = assert(proxy_client:send { @@ -365,7 +365,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) end) -- lua-multipart doesn't currently handle duplicates at all. @@ -461,7 +461,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) end) end) @@ -521,7 +521,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) res = assert(proxy_client:send { method = "GET", @@ -534,7 +534,7 @@ for _, strategy in helpers.each_strategy() do body = assert.res_status(401, res) json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) end) end) diff --git a/spec/03-plugins/10-basic-auth/03-access_spec.lua b/spec/03-plugins/10-basic-auth/03-access_spec.lua index 8a6c76014d0..1193c85de01 100644 --- a/spec/03-plugins/10-basic-auth/03-access_spec.lua +++ b/spec/03-plugins/10-basic-auth/03-access_spec.lua @@ -184,7 +184,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) assert.equal('Basic realm="test-realm"', res.headers["WWW-Authenticate"]) end) @@ -200,7 +200,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) assert.equal('Basic realm="test-realm"', res.headers["WWW-Authenticate"]) end) @@ -216,7 +216,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) assert.equal('Basic realm="test-realm"', res.headers["WWW-Authenticate"]) end) @@ -232,7 +232,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) assert.equal('Basic realm="test-realm"', res.headers["WWW-Authenticate"]) end) @@ -308,7 +308,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) assert.equal('Basic realm="test-realm"', res.headers["WWW-Authenticate"]) end) diff --git a/spec/03-plugins/10-basic-auth/05-declarative_spec.lua b/spec/03-plugins/10-basic-auth/05-declarative_spec.lua index db93e1fe376..7ee4d8becc6 100644 --- a/spec/03-plugins/10-basic-auth/05-declarative_spec.lua +++ b/spec/03-plugins/10-basic-auth/05-declarative_spec.lua @@ -179,7 +179,7 @@ for _, strategy in helpers.each_strategy() do local body = assert.res_status(401, res) local json = cjson.decode(body) assert.not_nil(json) - assert.matches("Invalid authentication credentials", json.message) + assert.matches("Unauthorized", json.message) end) end) diff --git a/spec/03-plugins/20-ldap-auth/01-access_spec.lua b/spec/03-plugins/20-ldap-auth/01-access_spec.lua index c4f4f259f23..f0aa66e60ad 100644 --- a/spec/03-plugins/20-ldap-auth/01-access_spec.lua +++ b/spec/03-plugins/20-ldap-auth/01-access_spec.lua @@ -237,7 +237,7 @@ for _, ldap_strategy in pairs(ldap_strategies) do }) assert.response(res).has.status(401) local json = assert.response(res).has.jsonbody() - assert.equal("Invalid authentication credentials", json.message) + assert.equal("Unauthorized", json.message) end) it("returns 'invalid credentials' when credential value is in wrong format in proxy-authorization header", function() local res = assert(proxy_client:send { @@ -250,7 +250,7 @@ for _, ldap_strategy in pairs(ldap_strategies) do }) assert.response(res).has.status(401) local json = assert.response(res).has.jsonbody() - assert.equal("Invalid authentication credentials", json.message) + assert.equal("Unauthorized", json.message) end) it("returns 'invalid credentials' when credential value is missing in authorization header", function() local res = assert(proxy_client:send { @@ -263,7 +263,7 @@ for _, ldap_strategy in pairs(ldap_strategies) do }) assert.response(res).has.status(401) local json = assert.response(res).has.jsonbody() - assert.equal("Invalid authentication credentials", json.message) + assert.equal("Unauthorized", json.message) end) it("passes if credential is valid in post request", function() local res = assert(proxy_client:send { From 30096f34be259985f09fc15009b3da82da1f9e8c Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Tue, 30 Jan 2024 10:53:22 +0200 Subject: [PATCH 06/91] perf(router): use static functions for callbacks (#12448) * perf(router): use static functions for callbacks Signed-off-by: Aapo Talvensaari * tuning some code * style clean * style clean * style clean --------- Signed-off-by: Aapo Talvensaari Co-authored-by: chronolaw --- kong/router/fields.lua | 145 +++++++++++++++++++++-------------------- 1 file changed, 73 insertions(+), 72 deletions(-) diff --git a/kong/router/fields.lua b/kong/router/fields.lua index f1e1a537a82..8bcdd7fbcb7 100644 --- a/kong/router/fields.lua +++ b/kong/router/fields.lua @@ -365,6 +365,72 @@ if is_http then end -- is_http +local function visit_for_cache_key(field, value, str_buf) + -- these fields were not in cache key + if field == "net.protocol" then + return true + end + + local headers_or_queries = field:sub(1, PREFIX_LEN) + + if headers_or_queries == HTTP_HEADERS_PREFIX then + headers_or_queries = true + field = replace_dashes_lower(field) + + elseif headers_or_queries == HTTP_QUERIES_PREFIX then + headers_or_queries = true + + else + headers_or_queries = false + end + + if not headers_or_queries then + str_buf:put(value or "", "|") + + else -- headers or queries + if type(value) == "table" then + tb_sort(value) + value = tb_concat(value, ",") + end + + str_buf:putf("%s=%s|", field, value or "") + end + + return true +end + + +local function visit_for_context(field, value, ctx) + local prefix = field:sub(1, PREFIX_LEN) + + if prefix == HTTP_HEADERS_PREFIX or prefix == HTTP_QUERIES_PREFIX then + local v_type = type(value) + + -- multiple values for a single query parameter, like /?foo=bar&foo=baz + if v_type == "table" then + for _, v in ipairs(value) do + local res, err = ctx:add_value(field, v) + if not res then + return nil, err + end + end + + return true + end -- if v_type + + -- the query parameter has only one value, like /?foo=bar + -- the query parameter has no value, like /?foo, + -- get_uri_arg will get a boolean `true` + -- we think it is equivalent to /?foo= + if v_type == "boolean" then + value = "" + end + end + + return ctx:add_value(field, value) +end + + local _M = {} local _MT = { __index = _M, } @@ -391,11 +457,11 @@ function _M:get_value(field, params, ctx) end -function _M:fields_visitor(params, ctx, cb) +function _M:fields_visitor(params, ctx, cb, cb_arg) for _, field in ipairs(self.fields) do local value = self:get_value(field, params, ctx) - local res, err = cb(field, value) + local res, err = cb(field, value, cb_arg) if not res then return nil, err end @@ -412,82 +478,17 @@ local str_buf = buffer.new(64) function _M:get_cache_key(params, ctx) str_buf:reset() - local res = - self:fields_visitor(params, ctx, function(field, value) - - -- these fields were not in cache key - if field == "net.protocol" then - return true - end - - local headers_or_queries = field:sub(1, PREFIX_LEN) - - if headers_or_queries == HTTP_HEADERS_PREFIX then - headers_or_queries = true - field = replace_dashes_lower(field) - - elseif headers_or_queries == HTTP_QUERIES_PREFIX then - headers_or_queries = true - - else - headers_or_queries = false - end - - if not headers_or_queries then - str_buf:put(value or ""):put("|") - - else -- headers or queries - if type(value) == "table" then - tb_sort(value) - value = tb_concat(value, ",") - end - - str_buf:putf("%s=%s|", field, value or "") - end - - return true - end) -- fields_visitor - + local res = self:fields_visitor(params, ctx, + visit_for_cache_key, str_buf) assert(res) return str_buf:get() end -function _M:fill_atc_context(context, params) - local c = context - - local res, err = - self:fields_visitor(params, nil, function(field, value) - - local prefix = field:sub(1, PREFIX_LEN) - - if prefix == HTTP_HEADERS_PREFIX or prefix == HTTP_QUERIES_PREFIX then - local v_type = type(value) - - -- multiple values for a single query parameter, like /?foo=bar&foo=baz - if v_type == "table" then - for _, v in ipairs(value) do - local res, err = c:add_value(field, v) - if not res then - return nil, err - end - end - - return true - end -- if v_type - - -- the query parameter has only one value, like /?foo=bar - -- the query parameter has no value, like /?foo, - -- get_uri_arg will get a boolean `true` - -- we think it is equivalent to /?foo= - if v_type == "boolean" then - value = "" - end - end - - return c:add_value(field, value) - end) -- fields_visitor +function _M:fill_atc_context(c, params) + local res, err = self:fields_visitor(params, nil, + visit_for_context, c) if not res then return nil, err From b7d50b01f4c69cbf7bbad5329f6d9947de61045b Mon Sep 17 00:00:00 2001 From: Achiel van der Mandele Date: Tue, 30 Jan 2024 03:38:07 -0600 Subject: [PATCH 07/91] docs(readme/license): update copyright date to 2024 (#12393) --- LICENSE | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LICENSE b/LICENSE index 2b684dabecd..3e39934b23a 100644 --- a/LICENSE +++ b/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016-2023 Kong Inc. + Copyright 2016-2024 Kong Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index d215c8469b5..e982fd6c5f1 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,7 @@ Kong Inc. offers commercial subscriptions that enhance the Kong API Gateway in a ## License ``` -Copyright 2016-2023 Kong Inc. +Copyright 2016-2024 Kong Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From cc2551610f0fdf0e3e86ddf8533a430ab5ad935e Mon Sep 17 00:00:00 2001 From: Hisham Muhammad Date: Tue, 30 Jan 2024 21:19:43 +0000 Subject: [PATCH 08/91] fix(deps): enable JIT support for pcre2 (#12464) PCRE2 requires JIT support to be explicitly enabled during build. From https://pcre.org/current/doc/html/pcre2jit.html: "JIT support is an optional feature of PCRE2. The "configure" option --enable-jit (or equivalent CMake option) must be set when PCRE2 is built if you want to use JIT." Without the flag in this commit, Kong logs display several entries containing failures in `pcre2_jit_compile`, such as ``` 2024/01/30 16:25:20 [info] 747309#0: pcre2_jit_compile() failed: -45 in "^\s*HTTP/1\.1\s+", ignored ``` --- build/openresty/pcre/BUILD.pcre.bazel | 1 + 1 file changed, 1 insertion(+) diff --git a/build/openresty/pcre/BUILD.pcre.bazel b/build/openresty/pcre/BUILD.pcre.bazel index 6e9658d9371..023f08b3a44 100644 --- a/build/openresty/pcre/BUILD.pcre.bazel +++ b/build/openresty/pcre/BUILD.pcre.bazel @@ -18,6 +18,7 @@ cmake( ], cache_entries = { "CMAKE_C_FLAGS": "${CMAKE_C_FLAGS:-} -fPIC", + "PCRE2_SUPPORT_JIT": "ON", # enable JIT support for pcre2_jit_compile "PCRE2_BUILD_PCRE2GREP": "OFF", # we don't need the cli binary "PCRE2_BUILD_TESTS": "OFF", # test doesn't compile on aarch64-linux-gnu (cross) "CMAKE_INSTALL_LIBDIR": "lib", # force distros that uses lib64 (rhel family) to use lib From 415ca0f0e2db5ff7e88da5bf90273558b324831e Mon Sep 17 00:00:00 2001 From: tzssangglass Date: Wed, 31 Jan 2024 17:24:50 +0800 Subject: [PATCH 09/91] fix(balancer): ensure the `notify` callback is invoked only if defined when handling cached connection errors (#12468) * fix(balancer): ensure the `notify` callback is invoked only if defined when handling cached connection errors address comments of https://github.com/Kong/kong/pull/12346 Signed-off-by: tzssangglass * fix Signed-off-by: tzssangglass --------- Signed-off-by: tzssangglass --- ...ua-0.10.26_01-dyn_upstream_keepalive.patch | 207 +++++++++--------- 1 file changed, 105 insertions(+), 102 deletions(-) diff --git a/build/openresty/patches/ngx_lua-0.10.26_01-dyn_upstream_keepalive.patch b/build/openresty/patches/ngx_lua-0.10.26_01-dyn_upstream_keepalive.patch index da5d5bde460..4cbfa021505 100644 --- a/build/openresty/patches/ngx_lua-0.10.26_01-dyn_upstream_keepalive.patch +++ b/build/openresty/patches/ngx_lua-0.10.26_01-dyn_upstream_keepalive.patch @@ -1,36 +1,39 @@ diff --git a/bundle/nginx-1.25.3/src/http/ngx_http_upstream.c b/bundle/nginx-1.25.3/src/http/ngx_http_upstream.c -index 2be233c..5ad6340 100644 +index f364448..a3539e6 100644 --- a/bundle/nginx-1.25.3/src/http/ngx_http_upstream.c +++ b/bundle/nginx-1.25.3/src/http/ngx_http_upstream.c -@@ -4383,6 +4383,7 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u, +@@ -4383,6 +4383,10 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u, if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR) { /* TODO: inform balancer instead */ u->peer.tries++; -+ u->peer.notify(&u->peer, u->peer.data, NGX_HTTP_UPSTREAM_NOFITY_CACHED_CONNECTION_ERROR); ++ if (u->peer.notify) { ++ u->peer.notify(&u->peer, u->peer.data, ++ NGX_HTTP_UPSTREAM_NOTIFY_CACHED_CONNECTION_ERROR); ++ } } - + switch (ft_type) { diff --git a/bundle/nginx-1.25.3/src/http/ngx_http_upstream.h b/bundle/nginx-1.25.3/src/http/ngx_http_upstream.h -index 15a35d9..c4209f4 100644 +index 15a35d9..51bad6b 100644 --- a/bundle/nginx-1.25.3/src/http/ngx_http_upstream.h +++ b/bundle/nginx-1.25.3/src/http/ngx_http_upstream.h @@ -56,6 +56,8 @@ #define NGX_HTTP_UPSTREAM_IGN_VARY 0x00000200 - - -+#define NGX_HTTP_UPSTREAM_NOFITY_CACHED_CONNECTION_ERROR 0x1 + + ++#define NGX_HTTP_UPSTREAM_NOTIFY_CACHED_CONNECTION_ERROR 0x1 + typedef struct { ngx_uint_t status; ngx_msec_t response_time; diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_balancer.c b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_balancer.c -index af4da73..99d073a 100644 +index af4da73..e10861c 100644 --- a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_balancer.c +++ b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_balancer.c @@ -16,46 +16,106 @@ #include "ngx_http_lua_directive.h" - - + + +typedef struct { + ngx_uint_t size; + ngx_uint_t connections; @@ -63,15 +66,15 @@ index af4da73..99d073a 100644 + ngx_uint_t total_tries; + + int last_peer_state; - + - ngx_http_lua_srv_conf_t *conf; - ngx_http_request_t *request; + ngx_str_t cpool_name; - + - ngx_uint_t more_tries; - ngx_uint_t total_tries; + void *data; - + - struct sockaddr *sockaddr; - socklen_t socklen; + ngx_event_get_peer_pt original_get_peer; @@ -81,13 +84,13 @@ index af4da73..99d073a 100644 + ngx_event_set_peer_session_pt original_set_session; + ngx_event_save_peer_session_pt original_save_session; +#endif - + - ngx_str_t *host; - in_port_t port; + ngx_http_request_t *request; + ngx_http_lua_srv_conf_t *conf; + ngx_http_lua_balancer_keepalive_pool_t *cpool; - + - int last_peer_state; + ngx_str_t *host; + @@ -95,14 +98,14 @@ index af4da73..99d073a 100644 + socklen_t socklen; + + unsigned keepalive:1; - + #if !(HAVE_NGX_UPSTREAM_TIMEOUT_FIELDS) - unsigned cloned_upstream_conf; /* :1 */ + unsigned cloned_upstream_conf:1; #endif }; - - + + -#if (NGX_HTTP_SSL) -static ngx_int_t ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, - void *data); @@ -151,13 +154,13 @@ index af4da73..99d073a 100644 + +static char ngx_http_lua_balancer_keepalive_pools_table_key; +static struct sockaddr *ngx_http_lua_balancer_default_server_sockaddr; - - + + ngx_int_t @@ -102,6 +162,61 @@ ngx_http_lua_balancer_handler_inline(ngx_http_request_t *r, } - - + + +static ngx_int_t +ngx_http_lua_balancer_by_chunk(lua_State *L, ngx_http_request_t *r) +{ @@ -236,9 +239,9 @@ index af4da73..99d073a 100644 ngx_http_upstream_srv_conf_t *uscf; + ngx_http_upstream_server_t *us; + ngx_http_lua_srv_conf_t *lscf = conf; - + dd("enter"); - + - /* must specify a content handler */ + /* content handler setup */ + @@ -246,13 +249,13 @@ index af4da73..99d073a 100644 return NGX_CONF_ERROR; } @@ -188,11 +305,42 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, - + lscf->balancer.src_key = cache_key; - + + /* balancer setup */ + uscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_module); - + + if (uscf->servers->nelts == 0) { + us = ngx_array_push(uscf->servers); + if (us == NULL) { @@ -286,11 +289,11 @@ index af4da73..99d073a 100644 + lscf->balancer.original_init_upstream = + ngx_http_upstream_init_round_robin; } - + uscf->peer.init_upstream = ngx_http_lua_balancer_init; @@ -208,14 +356,18 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, - - + + static ngx_int_t -ngx_http_lua_balancer_init(ngx_conf_t *cf, - ngx_http_upstream_srv_conf_t *us) @@ -304,12 +307,12 @@ index af4da73..99d073a 100644 + if (lscf->balancer.original_init_upstream(cf, us) != NGX_OK) { return NGX_ERROR; } - + - /* this callback is called upon individual requests */ + lscf->balancer.original_init_peer = us->peer.init; + us->peer.init = ngx_http_lua_balancer_init_peer; - + return NGX_OK; @@ -226,33 +378,39 @@ static ngx_int_t ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, @@ -318,7 +321,7 @@ index af4da73..99d073a 100644 - ngx_http_lua_srv_conf_t *bcf; + ngx_http_lua_srv_conf_t *lscf; ngx_http_lua_balancer_peer_data_t *bp; - + - bp = ngx_pcalloc(r->pool, sizeof(ngx_http_lua_balancer_peer_data_t)); - if (bp == NULL) { + lscf = ngx_http_conf_upstream_srv_conf(us, ngx_http_lua_module); @@ -326,7 +329,7 @@ index af4da73..99d073a 100644 + if (lscf->balancer.original_init_peer(r, us) != NGX_OK) { return NGX_ERROR; } - + - r->upstream->peer.data = &bp->rrp; - - if (ngx_http_upstream_init_round_robin_peer(r, us) != NGX_OK) { @@ -334,7 +337,7 @@ index af4da73..99d073a 100644 + if (bp == NULL) { return NGX_ERROR; } - + + bp->conf = lscf; + bp->request = r; + bp->data = r->upstream->peer.data; @@ -345,7 +348,7 @@ index af4da73..99d073a 100644 r->upstream->peer.get = ngx_http_lua_balancer_get_peer; r->upstream->peer.free = ngx_http_lua_balancer_free_peer; + r->upstream->peer.notify = ngx_http_lua_balancer_notify_peer; - + #if (NGX_HTTP_SSL) + bp->original_set_session = r->upstream->peer.set_session; + bp->original_save_session = r->upstream->peer.save_session; @@ -353,7 +356,7 @@ index af4da73..99d073a 100644 r->upstream->peer.set_session = ngx_http_lua_balancer_set_session; r->upstream->peer.save_session = ngx_http_lua_balancer_save_session; #endif - + - bcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_lua_module); - - bp->conf = bcf; @@ -361,7 +364,7 @@ index af4da73..99d073a 100644 - return NGX_OK; } - + @@ -260,25 +418,26 @@ ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, static ngx_int_t ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) @@ -383,27 +386,27 @@ index af4da73..99d073a 100644 + ngx_http_lua_balancer_keepalive_item_t *item; + ngx_http_lua_balancer_peer_data_t *bp = data; + void *pdata; - + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, - "lua balancer peer, tries: %ui", pc->tries); - - lscf = bp->conf; + "lua balancer: get peer, tries: %ui", pc->tries); - + r = bp->request; + lscf = bp->conf; - + ngx_http_lua_assert(lscf->balancer.handler && r); - + ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); - if (ctx == NULL) { ctx = ngx_http_lua_create_ctx(r); if (ctx == NULL) { @@ -296,21 +455,23 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) - + ctx->context = NGX_HTTP_LUA_CONTEXT_BALANCER; - + + bp->cpool = NULL; bp->sockaddr = NULL; bp->socklen = 0; @@ -413,7 +416,7 @@ index af4da73..99d073a 100644 + bp->keepalive_timeout = 0; + bp->keepalive = 0; bp->total_tries++; - + - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); - - /* balancer_by_lua does not support yielding and @@ -423,9 +426,9 @@ index af4da73..99d073a 100644 - lmcf->balancer_peer_data = bp; + pdata = r->upstream->peer.data; + r->upstream->peer.data = bp; - + rc = lscf->balancer.handler(r, lscf, L); - + + r->upstream->peer.data = pdata; + if (rc == NGX_ERROR) { @@ -434,7 +437,7 @@ index af4da73..99d073a 100644 @@ -332,79 +493,88 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) } } - + - if (bp->sockaddr && bp->socklen) { + if (ngx_http_lua_balancer_peer_set(bp)) { pc->sockaddr = bp->sockaddr; @@ -445,11 +448,11 @@ index af4da73..99d073a 100644 - pc->name = bp->host; - - bp->rrp.peers->single = 0; - + if (bp->more_tries) { r->upstream->peer.tries += bp->more_tries; } - + - dd("tries: %d", (int) r->upstream->peer.tries); - - return NGX_OK; @@ -461,7 +464,7 @@ index af4da73..99d073a 100644 + ngx_http_lua_balancer_get_keepalive_pool(L, pc->log, + &bp->cpool_name, + &bp->cpool); - + + if (bp->cpool == NULL + && ngx_http_lua_balancer_create_keepalive_pool(L, pc->log, + &bp->cpool_name, @@ -471,7 +474,7 @@ index af4da73..99d073a 100644 + { + return NGX_ERROR; + } - + -static ngx_int_t -ngx_http_lua_balancer_by_chunk(lua_State *L, ngx_http_request_t *r) -{ @@ -479,18 +482,18 @@ index af4da73..99d073a 100644 - size_t len; - ngx_int_t rc; + ngx_http_lua_assert(bp->cpool); - + - /* init nginx context in Lua VM */ - ngx_http_lua_set_req(L, r); + if (!ngx_queue_empty(&bp->cpool->cache)) { + q = ngx_queue_head(&bp->cpool->cache); - + -#ifndef OPENRESTY_LUAJIT - ngx_http_lua_create_new_globals_table(L, 0 /* narr */, 1 /* nrec */); + item = ngx_queue_data(q, ngx_http_lua_balancer_keepalive_item_t, + queue); + c = item->connection; - + - /* {{{ make new env inheriting main thread's globals table */ - lua_createtable(L, 0, 1 /* nrec */); /* the metatable for the new env */ - ngx_http_lua_get_globals_table(L); @@ -499,7 +502,7 @@ index af4da73..99d073a 100644 - /* }}} */ + ngx_queue_remove(q); + ngx_queue_insert_head(&bp->cpool->free, q); - + - lua_setfenv(L, -2); /* set new running env for the code closure */ -#endif /* OPENRESTY_LUAJIT */ + c->idle = 0; @@ -508,33 +511,33 @@ index af4da73..99d073a 100644 + c->read->log = pc->log; + c->write->log = pc->log; + c->pool->log = pc->log; - + - lua_pushcfunction(L, ngx_http_lua_traceback); - lua_insert(L, 1); /* put it under chunk and args */ + if (c->read->timer_set) { + ngx_del_timer(c->read); + } - + - /* protected call user code */ - rc = lua_pcall(L, 0, 1, 1); + pc->cached = 1; + pc->connection = c; - + - lua_remove(L, 1); /* remove traceback function */ + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "lua balancer: keepalive reusing connection %p, " + "requests: %ui, cpool: %p", + c, c->requests, bp->cpool); - + - dd("rc == %d", (int) rc); + return NGX_DONE; + } - + - if (rc != 0) { - /* error occurred when running loaded code */ - err_msg = (u_char *) lua_tolstring(L, -1, &len); + bp->cpool->connections++; - + - if (err_msg == NULL) { - err_msg = (u_char *) "unknown reason"; - len = sizeof("unknown reason") - 1; @@ -542,12 +545,12 @@ index af4da73..99d073a 100644 + "lua balancer: keepalive no free connection, " + "cpool: %p", bp->cpool); } - + - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "failed to run balancer_by_lua*: %*s", len, err_msg); + return NGX_OK; + } - + - lua_settop(L, 0); /* clear remaining elems on stack */ + rc = bp->original_get_peer(pc, bp->data); + if (rc == NGX_ERROR) { @@ -557,14 +560,14 @@ index af4da73..99d073a 100644 + if (pc->sockaddr == ngx_http_lua_balancer_default_server_sockaddr) { + ngx_log_error(NGX_LOG_ERR, pc->log, 0, + "lua balancer: no peer set"); - + return NGX_ERROR; } - + - lua_settop(L, 0); /* clear remaining elems on stack */ return rc; } - + @@ -413,24 +583,364 @@ static void ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, ngx_uint_t state) @@ -576,22 +579,22 @@ index af4da73..99d073a 100644 + ngx_http_lua_balancer_keepalive_item_t *item; + ngx_http_lua_balancer_keepalive_pool_t *cpool; + ngx_http_lua_balancer_peer_data_t *bp = data; - + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, - "lua balancer free peer, tries: %ui", pc->tries); + "lua balancer: free peer, tries: %ui", pc->tries); + + u = bp->request->upstream; + c = pc->connection; - + - if (bp->sockaddr && bp->socklen) { + if (ngx_http_lua_balancer_peer_set(bp)) { bp->last_peer_state = (int) state; - + if (pc->tries) { pc->tries--; } - + + if (ngx_http_lua_balancer_keepalive_is_enabled(bp)) { + cpool = bp->cpool; + @@ -709,7 +712,7 @@ index af4da73..99d073a 100644 +ngx_http_lua_balancer_notify_peer(ngx_peer_connection_t *pc, void *data, + ngx_uint_t type) +{ -+ if (type == NGX_HTTP_UPSTREAM_NOFITY_CACHED_CONNECTION_ERROR) { ++ if (type == NGX_HTTP_UPSTREAM_NOTIFY_CACHED_CONNECTION_ERROR) { + pc->tries--; + } +} @@ -835,14 +838,14 @@ index af4da73..99d073a 100644 + lua_pop(L, 1); /* orig stack */ return; } - + - /* fallback */ + ngx_http_lua_assert(lua_istable(L, -1)); + + lua_pushlstring(L, (const char *)cpool->cpool_name.data, cpool->cpool_name.len); + lua_pushnil(L); /* pools nil */ + lua_rawset(L, -3); /* pools */ - + - ngx_http_upstream_free_round_robin_peer(pc, data, state); + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, log, 0, + "lua balancer: keepalive free pool, " @@ -933,41 +936,41 @@ index af4da73..99d073a 100644 + ngx_http_lua_balancer_free_keepalive_pool(ev->log, item->cpool); + } } - - + + @@ -441,12 +951,12 @@ ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, void *data) { ngx_http_lua_balancer_peer_data_t *bp = data; - + - if (bp->sockaddr && bp->socklen) { + if (ngx_http_lua_balancer_peer_set(bp)) { /* TODO */ return NGX_OK; } - + - return ngx_http_upstream_set_round_robin_peer_session(pc, &bp->rrp); + return bp->original_set_session(pc, bp->data); } - - + + @@ -455,13 +965,12 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) { ngx_http_lua_balancer_peer_data_t *bp = data; - + - if (bp->sockaddr && bp->socklen) { + if (ngx_http_lua_balancer_peer_set(bp)) { /* TODO */ return; } - + - ngx_http_upstream_save_round_robin_peer_session(pc, &bp->rrp); - return; + bp->original_save_session(pc, bp->data); } - + #endif @@ -469,14 +978,14 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) - + int ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, - const u_char *addr, size_t addr_len, int port, char **err) @@ -985,13 +988,13 @@ index af4da73..99d073a 100644 + ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; + ngx_http_lua_balancer_peer_data_t *bp; - + if (r == NULL) { *err = "no request found"; @@ -501,18 +1010,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, return NGX_ERROR; } - + - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); - - /* we cannot read r->upstream->peer.data here directly because @@ -1005,12 +1008,12 @@ index af4da73..99d073a 100644 - } - ngx_memzero(&url, sizeof(ngx_url_t)); - + url.url.data = ngx_palloc(r->pool, addr_len); @@ -536,6 +1033,8 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, return NGX_ERROR; } - + + bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; + if (url.addrs && url.addrs[0].sockaddr) { @@ -1019,7 +1022,7 @@ index af4da73..99d073a 100644 @@ -546,6 +1045,72 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, return NGX_ERROR; } - + + if (cpool_name_len == 0) { + bp->cpool_name = *bp->host; + @@ -1088,7 +1091,7 @@ index af4da73..99d073a 100644 + return NGX_OK; } - + @@ -555,14 +1120,13 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, long connect_timeout, long send_timeout, long read_timeout, char **err) @@ -1097,20 +1100,20 @@ index af4da73..99d073a 100644 - ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; + ngx_http_upstream_t *u; - + #if !(HAVE_NGX_UPSTREAM_TIMEOUT_FIELDS) ngx_http_upstream_conf_t *ucf; -#endif - ngx_http_lua_main_conf_t *lmcf; ngx_http_lua_balancer_peer_data_t *bp; +#endif - + if (r == NULL) { *err = "no request found"; @@ -587,15 +1151,9 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, return NGX_ERROR; } - + - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); - - bp = lmcf->balancer_peer_data; @@ -1139,12 +1142,12 @@ index af4da73..99d073a 100644 + ngx_http_lua_ctx_t *ctx; + ngx_http_upstream_t *u; ngx_http_lua_balancer_peer_data_t *bp; - + if (r == NULL) { @@ -681,13 +1237,7 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, return NGX_ERROR; } - + - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); - - bp = lmcf->balancer_peer_data; @@ -1153,7 +1156,7 @@ index af4da73..99d073a 100644 - return NGX_ERROR; - } + bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; - + #if (nginx_version >= 1007005) max_tries = r->upstream->conf->next_upstream_tries; @@ -713,12 +1263,10 @@ int @@ -1169,13 +1172,13 @@ index af4da73..99d073a 100644 + ngx_http_upstream_state_t *state; ngx_http_lua_balancer_peer_data_t *bp; - ngx_http_lua_main_conf_t *lmcf; - + if (r == NULL) { *err = "no request found"; @@ -743,13 +1291,7 @@ ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, return NGX_ERROR; } - + - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); - - bp = lmcf->balancer_peer_data; @@ -1184,7 +1187,7 @@ index af4da73..99d073a 100644 - return NGX_ERROR; - } + bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; - + if (r->upstream_states && r->upstream_states->nelts > 1) { state = r->upstream_states->elts; diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_common.h b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_common.h @@ -1194,7 +1197,7 @@ index 4c94629..bec484e 100644 @@ -258,13 +258,6 @@ struct ngx_http_lua_main_conf_s { ngx_str_t exit_worker_src; u_char *exit_worker_chunkname; - + - ngx_http_lua_balancer_peer_data_t *balancer_peer_data; - /* neither yielding nor recursion is possible in - * balancer_by_lua*, so there cannot be any races among @@ -1207,7 +1210,7 @@ index 4c94629..bec484e 100644 * body_filter_by_lua*, so there cannot be any races among @@ -359,6 +352,10 @@ union ngx_http_lua_srv_conf_u { } srv; - + struct { + ngx_http_upstream_init_pt original_init_upstream; + ngx_http_upstream_init_peer_pt original_init_peer; From 99a9aa2deb3cc4da315b906b592ea3b56366283c Mon Sep 17 00:00:00 2001 From: Datong Sun Date: Wed, 31 Jan 2024 21:49:47 +0800 Subject: [PATCH 10/91] chore(patches): revert the "respect max retries" patch (#12470) * chore(patches): revert the "respect max retries" patch We have discovered potential segfault risk with the feature and we do not have enough time to review this in more depth, therefore we have decided to revert the change temporarily to further investigate. This reverts PR #12346. FTI-5616 --- ...ua-0.10.26_01-dyn_upstream_keepalive.patch | 291 ++++++++---------- .../kong/balancer_respect_max_retries.yml | 3 - .../05-proxy/10-balancer/08-retries_spec.lua | 128 -------- 3 files changed, 125 insertions(+), 297 deletions(-) delete mode 100644 changelog/unreleased/kong/balancer_respect_max_retries.yml delete mode 100644 spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua diff --git a/build/openresty/patches/ngx_lua-0.10.26_01-dyn_upstream_keepalive.patch b/build/openresty/patches/ngx_lua-0.10.26_01-dyn_upstream_keepalive.patch index 4cbfa021505..293fb3609e7 100644 --- a/build/openresty/patches/ngx_lua-0.10.26_01-dyn_upstream_keepalive.patch +++ b/build/openresty/patches/ngx_lua-0.10.26_01-dyn_upstream_keepalive.patch @@ -1,39 +1,11 @@ -diff --git a/bundle/nginx-1.25.3/src/http/ngx_http_upstream.c b/bundle/nginx-1.25.3/src/http/ngx_http_upstream.c -index f364448..a3539e6 100644 ---- a/bundle/nginx-1.25.3/src/http/ngx_http_upstream.c -+++ b/bundle/nginx-1.25.3/src/http/ngx_http_upstream.c -@@ -4383,6 +4383,10 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u, - if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR) { - /* TODO: inform balancer instead */ - u->peer.tries++; -+ if (u->peer.notify) { -+ u->peer.notify(&u->peer, u->peer.data, -+ NGX_HTTP_UPSTREAM_NOTIFY_CACHED_CONNECTION_ERROR); -+ } - } - - switch (ft_type) { -diff --git a/bundle/nginx-1.25.3/src/http/ngx_http_upstream.h b/bundle/nginx-1.25.3/src/http/ngx_http_upstream.h -index 15a35d9..51bad6b 100644 ---- a/bundle/nginx-1.25.3/src/http/ngx_http_upstream.h -+++ b/bundle/nginx-1.25.3/src/http/ngx_http_upstream.h -@@ -56,6 +56,8 @@ - #define NGX_HTTP_UPSTREAM_IGN_VARY 0x00000200 - - -+#define NGX_HTTP_UPSTREAM_NOTIFY_CACHED_CONNECTION_ERROR 0x1 -+ - typedef struct { - ngx_uint_t status; - ngx_msec_t response_time; diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_balancer.c b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_balancer.c -index af4da73..e10861c 100644 +index af4da733..407c115b 100644 --- a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_balancer.c +++ b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_balancer.c -@@ -16,46 +16,106 @@ +@@ -16,46 +16,104 @@ #include "ngx_http_lua_directive.h" - - + + +typedef struct { + ngx_uint_t size; + ngx_uint_t connections; @@ -66,15 +38,15 @@ index af4da73..e10861c 100644 + ngx_uint_t total_tries; + + int last_peer_state; - + - ngx_http_lua_srv_conf_t *conf; - ngx_http_request_t *request; + ngx_str_t cpool_name; - + - ngx_uint_t more_tries; - ngx_uint_t total_tries; + void *data; - + - struct sockaddr *sockaddr; - socklen_t socklen; + ngx_event_get_peer_pt original_get_peer; @@ -84,13 +56,13 @@ index af4da73..e10861c 100644 + ngx_event_set_peer_session_pt original_set_session; + ngx_event_save_peer_session_pt original_save_session; +#endif - + - ngx_str_t *host; - in_port_t port; + ngx_http_request_t *request; + ngx_http_lua_srv_conf_t *conf; + ngx_http_lua_balancer_keepalive_pool_t *cpool; - + - int last_peer_state; + ngx_str_t *host; + @@ -98,14 +70,14 @@ index af4da73..e10861c 100644 + socklen_t socklen; + + unsigned keepalive:1; - + #if !(HAVE_NGX_UPSTREAM_TIMEOUT_FIELDS) - unsigned cloned_upstream_conf; /* :1 */ + unsigned cloned_upstream_conf:1; #endif }; - - + + -#if (NGX_HTTP_SSL) -static ngx_int_t ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, - void *data); @@ -124,8 +96,6 @@ index af4da73..e10861c 100644 - ngx_http_request_t *r); static void ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, ngx_uint_t state); -+static void ngx_http_lua_balancer_notify_peer(ngx_peer_connection_t *pc, -+ void *data, ngx_uint_t type); +static ngx_int_t ngx_http_lua_balancer_create_keepalive_pool(lua_State *L, + ngx_log_t *log, ngx_str_t *cpool_name, ngx_uint_t cpool_size, + ngx_http_lua_balancer_keepalive_pool_t **cpool); @@ -154,13 +124,13 @@ index af4da73..e10861c 100644 + +static char ngx_http_lua_balancer_keepalive_pools_table_key; +static struct sockaddr *ngx_http_lua_balancer_default_server_sockaddr; - - + + ngx_int_t -@@ -102,6 +162,61 @@ ngx_http_lua_balancer_handler_inline(ngx_http_request_t *r, +@@ -102,6 +160,61 @@ ngx_http_lua_balancer_handler_inline(ngx_http_request_t *r, } - - + + +static ngx_int_t +ngx_http_lua_balancer_by_chunk(lua_State *L, ngx_http_request_t *r) +{ @@ -219,7 +189,7 @@ index af4da73..e10861c 100644 char * ngx_http_lua_balancer_by_lua_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) -@@ -125,18 +240,20 @@ char * +@@ -125,18 +238,20 @@ char * ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { @@ -239,23 +209,23 @@ index af4da73..e10861c 100644 ngx_http_upstream_srv_conf_t *uscf; + ngx_http_upstream_server_t *us; + ngx_http_lua_srv_conf_t *lscf = conf; - + dd("enter"); - + - /* must specify a content handler */ + /* content handler setup */ + if (cmd->post == NULL) { return NGX_CONF_ERROR; } -@@ -188,11 +305,42 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, - +@@ -188,11 +303,42 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, + lscf->balancer.src_key = cache_key; - + + /* balancer setup */ + uscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_module); - + + if (uscf->servers->nelts == 0) { + us = ngx_array_push(uscf->servers); + if (us == NULL) { @@ -289,11 +259,11 @@ index af4da73..e10861c 100644 + lscf->balancer.original_init_upstream = + ngx_http_upstream_init_round_robin; } - + uscf->peer.init_upstream = ngx_http_lua_balancer_init; -@@ -208,14 +356,18 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, - - +@@ -208,14 +354,18 @@ ngx_http_lua_balancer_by_lua(ngx_conf_t *cf, ngx_command_t *cmd, + + static ngx_int_t -ngx_http_lua_balancer_init(ngx_conf_t *cf, - ngx_http_upstream_srv_conf_t *us) @@ -307,21 +277,21 @@ index af4da73..e10861c 100644 + if (lscf->balancer.original_init_upstream(cf, us) != NGX_OK) { return NGX_ERROR; } - + - /* this callback is called upon individual requests */ + lscf->balancer.original_init_peer = us->peer.init; + us->peer.init = ngx_http_lua_balancer_init_peer; - + return NGX_OK; -@@ -226,33 +378,39 @@ static ngx_int_t +@@ -226,33 +376,38 @@ static ngx_int_t ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, ngx_http_upstream_srv_conf_t *us) { - ngx_http_lua_srv_conf_t *bcf; + ngx_http_lua_srv_conf_t *lscf; ngx_http_lua_balancer_peer_data_t *bp; - + - bp = ngx_pcalloc(r->pool, sizeof(ngx_http_lua_balancer_peer_data_t)); - if (bp == NULL) { + lscf = ngx_http_conf_upstream_srv_conf(us, ngx_http_lua_module); @@ -329,7 +299,7 @@ index af4da73..e10861c 100644 + if (lscf->balancer.original_init_peer(r, us) != NGX_OK) { return NGX_ERROR; } - + - r->upstream->peer.data = &bp->rrp; - - if (ngx_http_upstream_init_round_robin_peer(r, us) != NGX_OK) { @@ -337,7 +307,7 @@ index af4da73..e10861c 100644 + if (bp == NULL) { return NGX_ERROR; } - + + bp->conf = lscf; + bp->request = r; + bp->data = r->upstream->peer.data; @@ -347,8 +317,7 @@ index af4da73..e10861c 100644 + r->upstream->peer.data = bp; r->upstream->peer.get = ngx_http_lua_balancer_get_peer; r->upstream->peer.free = ngx_http_lua_balancer_free_peer; -+ r->upstream->peer.notify = ngx_http_lua_balancer_notify_peer; - + #if (NGX_HTTP_SSL) + bp->original_set_session = r->upstream->peer.set_session; + bp->original_save_session = r->upstream->peer.save_session; @@ -356,7 +325,7 @@ index af4da73..e10861c 100644 r->upstream->peer.set_session = ngx_http_lua_balancer_set_session; r->upstream->peer.save_session = ngx_http_lua_balancer_save_session; #endif - + - bcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_lua_module); - - bp->conf = bcf; @@ -364,8 +333,8 @@ index af4da73..e10861c 100644 - return NGX_OK; } - -@@ -260,25 +418,26 @@ ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, + +@@ -260,25 +415,26 @@ ngx_http_lua_balancer_init_peer(ngx_http_request_t *r, static ngx_int_t ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) { @@ -386,27 +355,27 @@ index af4da73..e10861c 100644 + ngx_http_lua_balancer_keepalive_item_t *item; + ngx_http_lua_balancer_peer_data_t *bp = data; + void *pdata; - + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, - "lua balancer peer, tries: %ui", pc->tries); - - lscf = bp->conf; + "lua balancer: get peer, tries: %ui", pc->tries); - + r = bp->request; + lscf = bp->conf; - + ngx_http_lua_assert(lscf->balancer.handler && r); - + ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); - if (ctx == NULL) { ctx = ngx_http_lua_create_ctx(r); if (ctx == NULL) { -@@ -296,21 +455,23 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) - +@@ -296,21 +452,23 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) + ctx->context = NGX_HTTP_LUA_CONTEXT_BALANCER; - + + bp->cpool = NULL; bp->sockaddr = NULL; bp->socklen = 0; @@ -416,7 +385,7 @@ index af4da73..e10861c 100644 + bp->keepalive_timeout = 0; + bp->keepalive = 0; bp->total_tries++; - + - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); - - /* balancer_by_lua does not support yielding and @@ -426,18 +395,18 @@ index af4da73..e10861c 100644 - lmcf->balancer_peer_data = bp; + pdata = r->upstream->peer.data; + r->upstream->peer.data = bp; - + rc = lscf->balancer.handler(r, lscf, L); - + + r->upstream->peer.data = pdata; + if (rc == NGX_ERROR) { return NGX_ERROR; } -@@ -332,79 +493,88 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) +@@ -332,79 +490,88 @@ ngx_http_lua_balancer_get_peer(ngx_peer_connection_t *pc, void *data) } } - + - if (bp->sockaddr && bp->socklen) { + if (ngx_http_lua_balancer_peer_set(bp)) { pc->sockaddr = bp->sockaddr; @@ -448,11 +417,11 @@ index af4da73..e10861c 100644 - pc->name = bp->host; - - bp->rrp.peers->single = 0; - + if (bp->more_tries) { r->upstream->peer.tries += bp->more_tries; } - + - dd("tries: %d", (int) r->upstream->peer.tries); - - return NGX_OK; @@ -464,7 +433,7 @@ index af4da73..e10861c 100644 + ngx_http_lua_balancer_get_keepalive_pool(L, pc->log, + &bp->cpool_name, + &bp->cpool); - + + if (bp->cpool == NULL + && ngx_http_lua_balancer_create_keepalive_pool(L, pc->log, + &bp->cpool_name, @@ -474,7 +443,7 @@ index af4da73..e10861c 100644 + { + return NGX_ERROR; + } - + -static ngx_int_t -ngx_http_lua_balancer_by_chunk(lua_State *L, ngx_http_request_t *r) -{ @@ -482,18 +451,18 @@ index af4da73..e10861c 100644 - size_t len; - ngx_int_t rc; + ngx_http_lua_assert(bp->cpool); - + - /* init nginx context in Lua VM */ - ngx_http_lua_set_req(L, r); + if (!ngx_queue_empty(&bp->cpool->cache)) { + q = ngx_queue_head(&bp->cpool->cache); - + -#ifndef OPENRESTY_LUAJIT - ngx_http_lua_create_new_globals_table(L, 0 /* narr */, 1 /* nrec */); + item = ngx_queue_data(q, ngx_http_lua_balancer_keepalive_item_t, + queue); + c = item->connection; - + - /* {{{ make new env inheriting main thread's globals table */ - lua_createtable(L, 0, 1 /* nrec */); /* the metatable for the new env */ - ngx_http_lua_get_globals_table(L); @@ -502,7 +471,7 @@ index af4da73..e10861c 100644 - /* }}} */ + ngx_queue_remove(q); + ngx_queue_insert_head(&bp->cpool->free, q); - + - lua_setfenv(L, -2); /* set new running env for the code closure */ -#endif /* OPENRESTY_LUAJIT */ + c->idle = 0; @@ -511,33 +480,33 @@ index af4da73..e10861c 100644 + c->read->log = pc->log; + c->write->log = pc->log; + c->pool->log = pc->log; - + - lua_pushcfunction(L, ngx_http_lua_traceback); - lua_insert(L, 1); /* put it under chunk and args */ + if (c->read->timer_set) { + ngx_del_timer(c->read); + } - + - /* protected call user code */ - rc = lua_pcall(L, 0, 1, 1); + pc->cached = 1; + pc->connection = c; - + - lua_remove(L, 1); /* remove traceback function */ + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "lua balancer: keepalive reusing connection %p, " + "requests: %ui, cpool: %p", + c, c->requests, bp->cpool); - + - dd("rc == %d", (int) rc); + return NGX_DONE; + } - + - if (rc != 0) { - /* error occurred when running loaded code */ - err_msg = (u_char *) lua_tolstring(L, -1, &len); + bp->cpool->connections++; - + - if (err_msg == NULL) { - err_msg = (u_char *) "unknown reason"; - len = sizeof("unknown reason") - 1; @@ -545,12 +514,12 @@ index af4da73..e10861c 100644 + "lua balancer: keepalive no free connection, " + "cpool: %p", bp->cpool); } - + - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "failed to run balancer_by_lua*: %*s", len, err_msg); + return NGX_OK; + } - + - lua_settop(L, 0); /* clear remaining elems on stack */ + rc = bp->original_get_peer(pc, bp->data); + if (rc == NGX_ERROR) { @@ -560,15 +529,15 @@ index af4da73..e10861c 100644 + if (pc->sockaddr == ngx_http_lua_balancer_default_server_sockaddr) { + ngx_log_error(NGX_LOG_ERR, pc->log, 0, + "lua balancer: no peer set"); - + return NGX_ERROR; } - + - lua_settop(L, 0); /* clear remaining elems on stack */ return rc; } - -@@ -413,24 +583,364 @@ static void + +@@ -413,24 +580,354 @@ static void ngx_http_lua_balancer_free_peer(ngx_peer_connection_t *pc, void *data, ngx_uint_t state) { @@ -579,22 +548,22 @@ index af4da73..e10861c 100644 + ngx_http_lua_balancer_keepalive_item_t *item; + ngx_http_lua_balancer_keepalive_pool_t *cpool; + ngx_http_lua_balancer_peer_data_t *bp = data; - + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, - "lua balancer free peer, tries: %ui", pc->tries); + "lua balancer: free peer, tries: %ui", pc->tries); + + u = bp->request->upstream; + c = pc->connection; - + - if (bp->sockaddr && bp->socklen) { + if (ngx_http_lua_balancer_peer_set(bp)) { bp->last_peer_state = (int) state; - + if (pc->tries) { pc->tries--; } - + + if (ngx_http_lua_balancer_keepalive_is_enabled(bp)) { + cpool = bp->cpool; + @@ -708,16 +677,6 @@ index af4da73..e10861c 100644 +} + + -+static void -+ngx_http_lua_balancer_notify_peer(ngx_peer_connection_t *pc, void *data, -+ ngx_uint_t type) -+{ -+ if (type == NGX_HTTP_UPSTREAM_NOTIFY_CACHED_CONNECTION_ERROR) { -+ pc->tries--; -+ } -+} -+ -+ +static ngx_int_t +ngx_http_lua_balancer_create_keepalive_pool(lua_State *L, ngx_log_t *log, + ngx_str_t *cpool_name, ngx_uint_t cpool_size, @@ -836,17 +795,15 @@ index af4da73..e10861c 100644 + + if (lua_isnil(L, -1)) { + lua_pop(L, 1); /* orig stack */ - return; - } - -- /* fallback */ ++ return; ++ } ++ + ngx_http_lua_assert(lua_istable(L, -1)); + + lua_pushlstring(L, (const char *)cpool->cpool_name.data, cpool->cpool_name.len); + lua_pushnil(L); /* pools nil */ + lua_rawset(L, -3); /* pools */ - -- ngx_http_upstream_free_round_robin_peer(pc, data, state); ++ + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, log, 0, + "lua balancer: keepalive free pool, " + "name: %V, cpool: %p", @@ -919,14 +876,16 @@ index af4da73..e10861c 100644 + goto close; + } + -+ return; -+ } -+ + return; + } + +- /* fallback */ +close: + + item = c->data; + c->log = ev->log; -+ + +- ngx_http_upstream_free_round_robin_peer(pc, data, state); + ngx_http_lua_balancer_close(c); + + ngx_queue_remove(&item->queue); @@ -936,41 +895,41 @@ index af4da73..e10861c 100644 + ngx_http_lua_balancer_free_keepalive_pool(ev->log, item->cpool); + } } - - -@@ -441,12 +951,12 @@ ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, void *data) + + +@@ -441,12 +938,12 @@ ngx_http_lua_balancer_set_session(ngx_peer_connection_t *pc, void *data) { ngx_http_lua_balancer_peer_data_t *bp = data; - + - if (bp->sockaddr && bp->socklen) { + if (ngx_http_lua_balancer_peer_set(bp)) { /* TODO */ return NGX_OK; } - + - return ngx_http_upstream_set_round_robin_peer_session(pc, &bp->rrp); + return bp->original_set_session(pc, bp->data); } - - -@@ -455,13 +965,12 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) + + +@@ -455,13 +952,12 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) { ngx_http_lua_balancer_peer_data_t *bp = data; - + - if (bp->sockaddr && bp->socklen) { + if (ngx_http_lua_balancer_peer_set(bp)) { /* TODO */ return; } - + - ngx_http_upstream_save_round_robin_peer_session(pc, &bp->rrp); - return; + bp->original_save_session(pc, bp->data); } - + #endif -@@ -469,14 +978,14 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) - +@@ -469,14 +965,14 @@ ngx_http_lua_balancer_save_session(ngx_peer_connection_t *pc, void *data) + int ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, - const u_char *addr, size_t addr_len, int port, char **err) @@ -988,13 +947,13 @@ index af4da73..e10861c 100644 + ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; + ngx_http_lua_balancer_peer_data_t *bp; - + if (r == NULL) { *err = "no request found"; -@@ -501,18 +1010,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, +@@ -501,18 +997,6 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, return NGX_ERROR; } - + - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); - - /* we cannot read r->upstream->peer.data here directly because @@ -1008,21 +967,21 @@ index af4da73..e10861c 100644 - } - ngx_memzero(&url, sizeof(ngx_url_t)); - + url.url.data = ngx_palloc(r->pool, addr_len); -@@ -536,6 +1033,8 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, +@@ -536,6 +1020,8 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, return NGX_ERROR; } - + + bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; + if (url.addrs && url.addrs[0].sockaddr) { bp->sockaddr = url.addrs[0].sockaddr; bp->socklen = url.addrs[0].socklen; -@@ -546,6 +1045,72 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, +@@ -546,6 +1032,72 @@ ngx_http_lua_ffi_balancer_set_current_peer(ngx_http_request_t *r, return NGX_ERROR; } - + + if (cpool_name_len == 0) { + bp->cpool_name = *bp->host; + @@ -1091,8 +1050,8 @@ index af4da73..e10861c 100644 + return NGX_OK; } - -@@ -555,14 +1120,13 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, + +@@ -555,14 +1107,13 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, long connect_timeout, long send_timeout, long read_timeout, char **err) { @@ -1100,20 +1059,20 @@ index af4da73..e10861c 100644 - ngx_http_upstream_t *u; + ngx_http_lua_ctx_t *ctx; + ngx_http_upstream_t *u; - + #if !(HAVE_NGX_UPSTREAM_TIMEOUT_FIELDS) ngx_http_upstream_conf_t *ucf; -#endif - ngx_http_lua_main_conf_t *lmcf; ngx_http_lua_balancer_peer_data_t *bp; +#endif - + if (r == NULL) { *err = "no request found"; -@@ -587,15 +1151,9 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, +@@ -587,15 +1138,9 @@ ngx_http_lua_ffi_balancer_set_timeouts(ngx_http_request_t *r, return NGX_ERROR; } - + - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); - - bp = lmcf->balancer_peer_data; @@ -1128,7 +1087,7 @@ index af4da73..e10861c 100644 if (!bp->cloned_upstream_conf) { /* we clone the upstream conf for the current request so that * we do not affect other requests at all. */ -@@ -650,12 +1208,10 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, +@@ -650,12 +1195,10 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, int count, char **err) { #if (nginx_version >= 1007005) @@ -1142,12 +1101,12 @@ index af4da73..e10861c 100644 + ngx_http_lua_ctx_t *ctx; + ngx_http_upstream_t *u; ngx_http_lua_balancer_peer_data_t *bp; - + if (r == NULL) { -@@ -681,13 +1237,7 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, +@@ -681,13 +1224,7 @@ ngx_http_lua_ffi_balancer_set_more_tries(ngx_http_request_t *r, return NGX_ERROR; } - + - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); - - bp = lmcf->balancer_peer_data; @@ -1156,10 +1115,10 @@ index af4da73..e10861c 100644 - return NGX_ERROR; - } + bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; - + #if (nginx_version >= 1007005) max_tries = r->upstream->conf->next_upstream_tries; -@@ -713,12 +1263,10 @@ int +@@ -713,12 +1250,10 @@ int ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, int *status, char **err) { @@ -1172,13 +1131,13 @@ index af4da73..e10861c 100644 + ngx_http_upstream_state_t *state; ngx_http_lua_balancer_peer_data_t *bp; - ngx_http_lua_main_conf_t *lmcf; - + if (r == NULL) { *err = "no request found"; -@@ -743,13 +1291,7 @@ ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, +@@ -743,13 +1278,7 @@ ngx_http_lua_ffi_balancer_get_last_failure(ngx_http_request_t *r, return NGX_ERROR; } - + - lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module); - - bp = lmcf->balancer_peer_data; @@ -1187,17 +1146,17 @@ index af4da73..e10861c 100644 - return NGX_ERROR; - } + bp = (ngx_http_lua_balancer_peer_data_t *) u->peer.data; - + if (r->upstream_states && r->upstream_states->nelts > 1) { state = r->upstream_states->elts; diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_common.h b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_common.h -index 4c94629..bec484e 100644 +index 4c946297..bec484e1 100644 --- a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_common.h +++ b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_common.h @@ -258,13 +258,6 @@ struct ngx_http_lua_main_conf_s { ngx_str_t exit_worker_src; u_char *exit_worker_chunkname; - + - ngx_http_lua_balancer_peer_data_t *balancer_peer_data; - /* neither yielding nor recursion is possible in - * balancer_by_lua*, so there cannot be any races among @@ -1210,7 +1169,7 @@ index 4c94629..bec484e 100644 * body_filter_by_lua*, so there cannot be any races among @@ -359,6 +352,10 @@ union ngx_http_lua_srv_conf_u { } srv; - + struct { + ngx_http_upstream_init_pt original_init_upstream; + ngx_http_upstream_init_peer_pt original_init_peer; @@ -1220,7 +1179,7 @@ index 4c94629..bec484e 100644 ngx_str_t src; u_char *src_key; diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_module.c b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_module.c -index fb10bf9..c2f085b 100644 +index fb10bf93..c2f085be 100644 --- a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_module.c +++ b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_module.c @@ -1188,6 +1188,9 @@ ngx_http_lua_create_srv_conf(ngx_conf_t *cf) diff --git a/changelog/unreleased/kong/balancer_respect_max_retries.yml b/changelog/unreleased/kong/balancer_respect_max_retries.yml deleted file mode 100644 index 1884ad1ce9f..00000000000 --- a/changelog/unreleased/kong/balancer_respect_max_retries.yml +++ /dev/null @@ -1,3 +0,0 @@ -message: Fix an issue that the actual number of retry times exceeds the `retries` setting. -type: bugfix -scope: Core diff --git a/spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua b/spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua deleted file mode 100644 index b3245055dfe..00000000000 --- a/spec/02-integration/05-proxy/10-balancer/08-retries_spec.lua +++ /dev/null @@ -1,128 +0,0 @@ -local helpers = require "spec.helpers" -local cjson = require "cjson" - -local function get_log(typ, n) - local entries - helpers.wait_until(function() - local client = assert(helpers.http_client(helpers.mock_upstream_host, - helpers.mock_upstream_port)) - local res = client:get("/read_log/" .. typ, { - headers = { - Accept = "application/json" - } - }) - local raw = assert.res_status(200, res) - local body = cjson.decode(raw) - - entries = body.entries - return #entries > 0 - end, 10) - if n then - assert(#entries == n, "expected " .. n .. " log entries, but got " .. #entries) - end - return entries -end - -for _, strategy in helpers.each_strategy() do - describe("Balancer: respect max retries [#" .. strategy .. "]", function() - local service - - lazy_setup(function() - local bp = helpers.get_db_utils(strategy, { - "routes", - "services", - "plugins", - }) - - service = bp.services:insert { - name = "retry_service", - host = "127.0.0.1", - port = 62351, - retries = 5, - } - - local route = bp.routes:insert { - service = service, - paths = { "/hello" }, - strip_path = false, - } - - bp.plugins:insert { - route = { id = route.id }, - name = "http-log", - config = { - queue = { - max_batch_size = 1, - max_coalescing_delay = 0.1, - }, - http_endpoint = "http://" .. helpers.mock_upstream_host - .. ":" - .. helpers.mock_upstream_port - .. "/post_log/http" - } - } - - local fixtures = { - http_mock = {} - } - - fixtures.http_mock.my_server_block = [[ - server { - listen 0.0.0.0:62351; - location /hello { - content_by_lua_block { - local request_counter = ngx.shared.request_counter - local first_request = request_counter:get("first_request") - if first_request == nil then - request_counter:set("first_request", "yes") - ngx.say("hello") - else - ngx.exit(ngx.HTTP_CLOSE) - end - } - } - } - ]] - - assert(helpers.start_kong({ - database = strategy, - nginx_conf = "spec/fixtures/custom_nginx.template", - nginx_http_lua_shared_dict = "request_counter 1m", - }, nil, nil, fixtures)) - end) - - lazy_teardown(function() - helpers.stop_kong() - end) - - it("exceeded limit", function() - -- First request should succeed and save connection to upstream in keepalive pool - local proxy_client1 = helpers.proxy_client() - local res = assert(proxy_client1:send { - method = "GET", - path = "/hello", - }) - - assert.res_status(200, res) - - proxy_client1:close() - - -- Second request should failed 1 times and retry 5 times and then return 502 - local proxy_client2 = helpers.proxy_client() - - res = assert(proxy_client2:send { - method = "GET", - path = "/hello", - }) - - assert.res_status(502, res) - - -- wait for the http-log plugin to flush the log - ngx.sleep(1) - - local entries = get_log("http", 2) - assert.equal(#entries[2].tries, 6) - assert.equal(entries[2].upstream_status, "502, 502, 502, 502, 502, 502") - end) - end) -end From 9a101a6a909be454fc41a86f089045b9981d9c43 Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Thu, 1 Feb 2024 14:26:04 +0200 Subject: [PATCH 11/91] hotfix(deps): bump openssl from 3.2.0 to 3.2.1 (#12482) ### Summary See: https://www.openssl.org/news/cl32.txt Signed-off-by: Aapo Talvensaari --- .requirements | 2 +- build/openresty/openssl/openssl_repositories.bzl | 2 +- changelog/unreleased/kong/bump-openssl.yml | 2 +- scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt | 2 +- scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt | 2 +- scripts/explain_manifest/fixtures/amazonlinux-2023-arm64.txt | 2 +- scripts/explain_manifest/fixtures/debian-10-amd64.txt | 2 +- scripts/explain_manifest/fixtures/debian-11-amd64.txt | 2 +- scripts/explain_manifest/fixtures/debian-12-amd64.txt | 2 +- scripts/explain_manifest/fixtures/el7-amd64.txt | 2 +- scripts/explain_manifest/fixtures/el8-amd64.txt | 2 +- scripts/explain_manifest/fixtures/el9-amd64.txt | 2 +- scripts/explain_manifest/fixtures/el9-arm64.txt | 2 +- scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt | 2 +- scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt | 2 +- scripts/explain_manifest/fixtures/ubuntu-22.04-arm64.txt | 2 +- 16 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.requirements b/.requirements index 8e687f97a79..db51855b150 100644 --- a/.requirements +++ b/.requirements @@ -2,7 +2,7 @@ KONG_PACKAGE_NAME=kong OPENRESTY=1.25.3.1 LUAROCKS=3.9.2 -OPENSSL=3.2.0 +OPENSSL=3.2.1 PCRE=10.42 LIBEXPAT=2.5.0 diff --git a/build/openresty/openssl/openssl_repositories.bzl b/build/openresty/openssl/openssl_repositories.bzl index f06c848fc92..8d80947d3ea 100644 --- a/build/openresty/openssl/openssl_repositories.bzl +++ b/build/openresty/openssl/openssl_repositories.bzl @@ -11,7 +11,7 @@ def openssl_repositories(): http_archive, name = "openssl", build_file = "//build/openresty/openssl:BUILD.bazel", - sha256 = "14c826f07c7e433706fb5c69fa9e25dab95684844b4c962a2cf1bf183eb4690e", + sha256 = "83c7329fe52c850677d75e5d0b0ca245309b97e8ecbcfdc1dfdc4ab9fac35b39", strip_prefix = "openssl-" + version, urls = [ "https://www.openssl.org/source/openssl-" + version + ".tar.gz", diff --git a/changelog/unreleased/kong/bump-openssl.yml b/changelog/unreleased/kong/bump-openssl.yml index 687f0c70200..75c3e6129f1 100644 --- a/changelog/unreleased/kong/bump-openssl.yml +++ b/changelog/unreleased/kong/bump-openssl.yml @@ -1,3 +1,3 @@ -message: Bumped OpenSSL from 3.1.4 to 3.2.0 +message: Bumped OpenSSL from 3.1.4 to 3.2.1 type: dependency scope: Core diff --git a/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt b/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt index 34190b2b924..9c1876426ff 100644 --- a/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt +++ b/scripts/explain_manifest/fixtures/amazonlinux-2-amd64.txt @@ -203,7 +203,7 @@ - lua-resty-lmdb - ngx_brotli - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt b/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt index b67b46ffebb..1767598eebb 100644 --- a/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt +++ b/scripts/explain_manifest/fixtures/amazonlinux-2023-amd64.txt @@ -189,7 +189,7 @@ - lua-resty-lmdb - ngx_brotli - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/amazonlinux-2023-arm64.txt b/scripts/explain_manifest/fixtures/amazonlinux-2023-arm64.txt index 48576d505f1..320540e5c77 100644 --- a/scripts/explain_manifest/fixtures/amazonlinux-2023-arm64.txt +++ b/scripts/explain_manifest/fixtures/amazonlinux-2023-arm64.txt @@ -170,7 +170,7 @@ - lua-resty-events - lua-resty-lmdb - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/debian-10-amd64.txt b/scripts/explain_manifest/fixtures/debian-10-amd64.txt index d79c02cde0f..3ee40f75e36 100644 --- a/scripts/explain_manifest/fixtures/debian-10-amd64.txt +++ b/scripts/explain_manifest/fixtures/debian-10-amd64.txt @@ -203,7 +203,7 @@ - lua-resty-lmdb - ngx_brotli - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/debian-11-amd64.txt b/scripts/explain_manifest/fixtures/debian-11-amd64.txt index 6b2c8a6327a..4387961f6e5 100644 --- a/scripts/explain_manifest/fixtures/debian-11-amd64.txt +++ b/scripts/explain_manifest/fixtures/debian-11-amd64.txt @@ -192,7 +192,7 @@ - lua-resty-lmdb - ngx_brotli - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/debian-12-amd64.txt b/scripts/explain_manifest/fixtures/debian-12-amd64.txt index 1db2a407276..e47f94f75ff 100644 --- a/scripts/explain_manifest/fixtures/debian-12-amd64.txt +++ b/scripts/explain_manifest/fixtures/debian-12-amd64.txt @@ -179,7 +179,7 @@ - lua-resty-lmdb - ngx_brotli - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/el7-amd64.txt b/scripts/explain_manifest/fixtures/el7-amd64.txt index b0d0b772ff0..d64e3806398 100644 --- a/scripts/explain_manifest/fixtures/el7-amd64.txt +++ b/scripts/explain_manifest/fixtures/el7-amd64.txt @@ -202,7 +202,7 @@ - lua-resty-events - lua-resty-lmdb - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/el8-amd64.txt b/scripts/explain_manifest/fixtures/el8-amd64.txt index c0e493082a4..32b4666f539 100644 --- a/scripts/explain_manifest/fixtures/el8-amd64.txt +++ b/scripts/explain_manifest/fixtures/el8-amd64.txt @@ -202,7 +202,7 @@ - lua-resty-lmdb - ngx_brotli - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/el9-amd64.txt b/scripts/explain_manifest/fixtures/el9-amd64.txt index 87ddaec8f70..e6bc2c9d3b6 100644 --- a/scripts/explain_manifest/fixtures/el9-amd64.txt +++ b/scripts/explain_manifest/fixtures/el9-amd64.txt @@ -189,7 +189,7 @@ - lua-resty-lmdb - ngx_brotli - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/el9-arm64.txt b/scripts/explain_manifest/fixtures/el9-arm64.txt index 48576d505f1..320540e5c77 100644 --- a/scripts/explain_manifest/fixtures/el9-arm64.txt +++ b/scripts/explain_manifest/fixtures/el9-arm64.txt @@ -170,7 +170,7 @@ - lua-resty-events - lua-resty-lmdb - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt b/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt index 854c2289e38..34cad3f9fdf 100644 --- a/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt +++ b/scripts/explain_manifest/fixtures/ubuntu-20.04-amd64.txt @@ -196,6 +196,6 @@ - lua-resty-lmdb - ngx_brotli - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt b/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt index 8c96980a475..0c565edc151 100644 --- a/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt +++ b/scripts/explain_manifest/fixtures/ubuntu-22.04-amd64.txt @@ -183,7 +183,7 @@ - lua-resty-lmdb - ngx_brotli - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True diff --git a/scripts/explain_manifest/fixtures/ubuntu-22.04-arm64.txt b/scripts/explain_manifest/fixtures/ubuntu-22.04-arm64.txt index da9623d15a0..5ba824549c9 100644 --- a/scripts/explain_manifest/fixtures/ubuntu-22.04-arm64.txt +++ b/scripts/explain_manifest/fixtures/ubuntu-22.04-arm64.txt @@ -181,7 +181,7 @@ - lua-resty-lmdb - ngx_brotli - ngx_wasm_module - OpenSSL : OpenSSL 3.2.0 23 Nov 2023 + OpenSSL : OpenSSL 3.2.1 30 Jan 2024 DWARF : True DWARF - ngx_http_request_t related DWARF DIEs: True From c7cb900f4919e6b320b7bdf9132cfa4747679760 Mon Sep 17 00:00:00 2001 From: Samuele Date: Thu, 1 Feb 2024 19:21:57 +0100 Subject: [PATCH 12/91] perf(opentelemetry): increase max batch size (#12488) The max batch size for Opentelemetry was set to the default value: 1 the value actually refers to the number of spans in a batch, so we are increasing the default value to 200 which corresponds to what the default value used to be with the "old" queue implementation. --- .../unreleased/kong/otel-increase-queue-max-batch-size.yml | 3 +++ kong/plugins/opentelemetry/schema.lua | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/kong/otel-increase-queue-max-batch-size.yml diff --git a/changelog/unreleased/kong/otel-increase-queue-max-batch-size.yml b/changelog/unreleased/kong/otel-increase-queue-max-batch-size.yml new file mode 100644 index 00000000000..6936adcf761 --- /dev/null +++ b/changelog/unreleased/kong/otel-increase-queue-max-batch-size.yml @@ -0,0 +1,3 @@ +message: "**Opentelemetry**: increase queue max batch size to 200" +type: performance +scope: Plugin diff --git a/kong/plugins/opentelemetry/schema.lua b/kong/plugins/opentelemetry/schema.lua index 4601703163d..85d8f4c1834 100644 --- a/kong/plugins/opentelemetry/schema.lua +++ b/kong/plugins/opentelemetry/schema.lua @@ -45,7 +45,11 @@ return { }, } }, { resource_attributes = resource_attributes }, - { queue = typedefs.queue }, + { queue = typedefs.queue { + default = { + max_batch_size = 200, + }, + } }, { batch_span_count = { description = "The number of spans to be sent in a single batch.", type = "integer" } }, { batch_flush_delay = { description = "The delay, in seconds, between two consecutive batches.", type = "integer" } }, { connect_timeout = typedefs.timeout { default = 1000 } }, From 00211cbe328d10831b0940b21e6d35ed0b880268 Mon Sep 17 00:00:00 2001 From: Jack Tysoe <91137069+tysoekong@users.noreply.github.com> Date: Thu, 1 Feb 2024 21:32:04 +0000 Subject: [PATCH 13/91] feat(ai-proxy): add telemetry for ai-proxy (#12492) --- .../kong/add-ai-proxy-telemetry.yml | 3 +++ kong/api/routes/plugins.lua | 21 +++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 changelog/unreleased/kong/add-ai-proxy-telemetry.yml diff --git a/changelog/unreleased/kong/add-ai-proxy-telemetry.yml b/changelog/unreleased/kong/add-ai-proxy-telemetry.yml new file mode 100644 index 00000000000..829bb8e4958 --- /dev/null +++ b/changelog/unreleased/kong/add-ai-proxy-telemetry.yml @@ -0,0 +1,3 @@ +message: Adds telemetry collection for AI Proxy, AI Request Transformer, and AI Response Transformer, pertaining to model and provider usage. +type: feature +scope: Core diff --git a/kong/api/routes/plugins.lua b/kong/api/routes/plugins.lua index 0336e85eac4..bf8be078b07 100644 --- a/kong/api/routes/plugins.lua +++ b/kong/api/routes/plugins.lua @@ -38,6 +38,27 @@ local function reports_timer(premature, data) r_data.e = "c" end + if data.name == "ai-proxy" then + r_data.config = { + llm = { + model = {} + } + } + + r_data.config.llm.model.name = data.config.model.name + r_data.config.llm.model.provider = data.config.model.provider + + elseif data.name == "ai-request-transformer" or data.name == "ai-response-transformer" then + r_data.config = { + llm = { + model = {} + } + } + + r_data.config.llm.model.name = data.config.llm.model.name + r_data.config.llm.model.provider = data.config.llm.model.provider + end + reports.send("api", r_data) end From 1fb8be5a52c0adfeceae89e2056128734ccfc489 Mon Sep 17 00:00:00 2001 From: Jack Tysoe <91137069+tysoekong@users.noreply.github.com> Date: Thu, 1 Feb 2024 21:34:50 +0000 Subject: [PATCH 14/91] fix(ai-proxy): double-gzipping responses when status is not 200 (#12493) --- kong/plugins/ai-proxy/handler.lua | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/kong/plugins/ai-proxy/handler.lua b/kong/plugins/ai-proxy/handler.lua index 0a824395ac1..89242ffc448 100644 --- a/kong/plugins/ai-proxy/handler.lua +++ b/kong/plugins/ai-proxy/handler.lua @@ -43,6 +43,8 @@ function _M:header_filter(conf) local new_response_string, err = ai_driver.from_format(response_body, conf.model, route_type) if err then + kong.ctx.plugin.ai_parser_error = true + ngx.status = 500 local message = { error = { @@ -66,21 +68,24 @@ end function _M:body_filter(conf) if not kong.ctx.shared.skip_response_transformer then - -- all errors MUST be checked and returned in header_filter - -- we should receive a replacement response body from the same thread - - local original_request = kong.ctx.plugin.parsed_response or kong.response.get_raw_body() - local deflated_request = kong.ctx.plugin.parsed_response or kong.response.get_raw_body() - if deflated_request then - local is_gzip = kong.response.get_header("Content-Encoding") == "gzip" - if is_gzip then - deflated_request = kong_utils.deflate_gzip(deflated_request) + if (kong.response.get_status() == 200) or (kong.ctx.plugin.ai_parser_error) then + -- all errors MUST be checked and returned in header_filter + -- we should receive a replacement response body from the same thread + + local original_request = kong.ctx.plugin.parsed_response + local deflated_request = kong.ctx.plugin.parsed_response + if deflated_request then + local is_gzip = kong.response.get_header("Content-Encoding") == "gzip" + if is_gzip then + deflated_request = kong_utils.deflate_gzip(deflated_request) + end + + kong.response.set_raw_body(deflated_request) end - kong.response.set_raw_body(deflated_request) - end - -- call with replacement body, or original body if nothing changed - ai_shared.post_request(conf, original_request) + -- call with replacement body, or original body if nothing changed + ai_shared.post_request(conf, original_request) + end end end From dd257675dc95216333dc625578bbe9bf8ca6d397 Mon Sep 17 00:00:00 2001 From: Xumin <100666470+StarlightIbuki@users.noreply.github.com> Date: Fri, 2 Feb 2024 03:18:02 +0000 Subject: [PATCH 15/91] chore(tests): sync slightly different comments of the http_mock (#12399) --- spec/helpers/http_mock.lua | 17 ++++++++++++----- spec/helpers/http_mock/asserts.lua | 4 +--- spec/helpers/http_mock/debug_port.lua | 3 +-- spec/helpers/http_mock/template.lua | 2 +- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/spec/helpers/http_mock.lua b/spec/helpers/http_mock.lua index 7d54aac55ed..229043b436a 100644 --- a/spec/helpers/http_mock.lua +++ b/spec/helpers/http_mock.lua @@ -118,8 +118,15 @@ end -- client:send({}) -- local logs = mock:retrieve_mocking_logs() -- get all the logs of HTTP sessions -- mock:stop() --- @usage --- -- routes can be a table like this: +-- +-- listens can be a number, which will be used as the port of the mock server; +-- or a string, which will be used as the param of listen directive of the mock server; +-- or a table represents multiple listen ports. +-- if the port is not specified, a random port will be used. +-- call mock:get_default_port() to get the first port the mock server listens to. +-- if the port is a number and opts.tls is set to ture, ssl will be appended. +-- +-- routes can be a table like this: -- routes = { -- ["/"] = { -- access = [[ @@ -234,11 +241,11 @@ end --- make assertions on HTTP requests. -- with a timeout to wait for the requests to arrive --- @class http_mock.eventually +-- @table http_mock.eventually --- assert if the condition is true for one of the logs. --- Replace "session" in the name of the function to assert on fields of the log. --- The field can be one of "session", "request", "response", "error". +--- Replace "session" in the name of the function to assert on fields of the log. +--- The field can be one of "session", "request", "response", "error". -- @function http_mock.eventually:has_session_satisfy -- @tparam function check the check function, accept a log and throw error if the condition is not satisfied diff --git a/spec/helpers/http_mock/asserts.lua b/spec/helpers/http_mock/asserts.lua index 8d3705c90b5..08664c65d49 100644 --- a/spec/helpers/http_mock/asserts.lua +++ b/spec/helpers/http_mock/asserts.lua @@ -4,12 +4,10 @@ local pairs = pairs local pcall = pcall local error = error ----@class http_mock local http_mock = {} local build_in_checks = {} ----@class http_mock_asserts local eventually_MT = {} eventually_MT.__index = eventually_MT @@ -147,7 +145,7 @@ end -- a session means a request/response pair. -- The impl callback throws error if the assertion is not true -- and returns a string to tell what condition is satisfied --- This design is to allow the user to use lua asserts in the callback +-- This design is to allow the user to use lua asserts in the callback -- (or even callback the registered assertion accept as argument), like the example; -- and for has_no/not_all assertions, we can construct an error message for it like: -- "we don't expect that: has header foo" diff --git a/spec/helpers/http_mock/debug_port.lua b/spec/helpers/http_mock/debug_port.lua index e5db9e5327f..89fe65d915f 100644 --- a/spec/helpers/http_mock/debug_port.lua +++ b/spec/helpers/http_mock/debug_port.lua @@ -6,7 +6,6 @@ local ipairs = ipairs local insert = table.insert local assert = assert ----@class http_mock local http_mock = {} -- POST as it's not idempotent @@ -106,7 +105,7 @@ function http_mock:get_all_logs(timeout) end function http_mock:clean(timeout) - -- if we wait, the http_client may timeout and cause error + -- if we wait, the http_client may timeout and cause error -- self:wait_until_no_request(timeout) -- clean unwanted logs diff --git a/spec/helpers/http_mock/template.lua b/spec/helpers/http_mock/template.lua index fc8c097597e..843f12c9c61 100644 --- a/spec/helpers/http_mock/template.lua +++ b/spec/helpers/http_mock/template.lua @@ -244,4 +244,4 @@ $(init) # end -- for location, route in pairs(routes) } } -]] \ No newline at end of file +]] From 3eafdc266a211e36fcc675825f83f91e47ba872d Mon Sep 17 00:00:00 2001 From: Enrique Garcia Cota Date: Fri, 26 Jan 2024 12:10:32 +0100 Subject: [PATCH 16/91] chore(release): bump version to 3.7 as part of the Feature Freeze --- kong-3.6.0-0.rockspec => kong-3.7.0-0.rockspec | 4 ++-- kong/meta.lua | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) rename kong-3.6.0-0.rockspec => kong-3.7.0-0.rockspec (99%) diff --git a/kong-3.6.0-0.rockspec b/kong-3.7.0-0.rockspec similarity index 99% rename from kong-3.6.0-0.rockspec rename to kong-3.7.0-0.rockspec index eeb32cca231..cca7ee53d66 100644 --- a/kong-3.6.0-0.rockspec +++ b/kong-3.7.0-0.rockspec @@ -1,10 +1,10 @@ package = "kong" -version = "3.6.0-0" +version = "3.7.0-0" rockspec_format = "3.0" supported_platforms = {"linux", "macosx"} source = { url = "git+https://github.com/Kong/kong.git", - tag = "3.6.0" + tag = "3.7.0" } description = { summary = "Kong is a scalable and customizable API Management Layer built on top of Nginx.", diff --git a/kong/meta.lua b/kong/meta.lua index c149073e1dc..289dd9dbf27 100644 --- a/kong/meta.lua +++ b/kong/meta.lua @@ -1,6 +1,6 @@ local version = setmetatable({ major = 3, - minor = 6, + minor = 7, patch = 0, --suffix = "-alpha.13" }, { From 2516c5035f8a2406a3add38370b520f54aac6a11 Mon Sep 17 00:00:00 2001 From: Niklaus Schen <8458369+Water-Melon@users.noreply.github.com> Date: Fri, 2 Feb 2024 16:43:03 +0800 Subject: [PATCH 17/91] chore(conf): disable TLSv1.1 and lower in openssl 3.x (#12420) - remove unsupported TLS versions from default configurations. - support communication with old versions of OpenSSL clients using TLSv1.1. KAG-3259 --- .../kong/disable-TLSv1_1-in-openssl3.yml | 3 +++ kong.conf.default | 7 +++++-- kong/conf_loader/parse.lua | 17 +++++++++++++++++ kong/templates/kong_defaults.lua | 10 ++++++++-- kong/templates/nginx_kong.lua | 7 ++++++- kong/templates/nginx_kong_stream.lua | 6 ++++++ spec/01-unit/03-conf_loader_spec.lua | 10 +++++----- spec/01-unit/04-prefix_handler_spec.lua | 4 ++-- spec/01-unit/28-inject_confs_spec.lua | 4 ++-- spec/fixtures/1.2_custom_nginx.template | 8 ++++---- spec/fixtures/aws-lambda.lua | 2 +- spec/fixtures/mock_webserver_tpl.lua | 2 +- .../nginx_kong_test_custom_inject_http.lua | 2 +- .../nginx_kong_test_custom_inject_stream.lua | 4 ++-- ...est_tcp_echo_server_custom_inject_stream.lua | 2 +- spec/helpers.lua | 2 +- spec/helpers/http_mock/template.lua | 2 +- 17 files changed, 66 insertions(+), 26 deletions(-) create mode 100644 changelog/unreleased/kong/disable-TLSv1_1-in-openssl3.yml diff --git a/changelog/unreleased/kong/disable-TLSv1_1-in-openssl3.yml b/changelog/unreleased/kong/disable-TLSv1_1-in-openssl3.yml new file mode 100644 index 00000000000..aa9305e7731 --- /dev/null +++ b/changelog/unreleased/kong/disable-TLSv1_1-in-openssl3.yml @@ -0,0 +1,3 @@ +message: now TLSv1.1 and lower is by default disabled in OpenSSL 3.x +type: feature +scope: Configuration diff --git a/kong.conf.default b/kong.conf.default index b5021cea8c3..77b9a28788f 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -748,6 +748,7 @@ #ssl_cipher_suite = intermediate # Defines the TLS ciphers served by Nginx. # Accepted values are `modern`, # `intermediate`, `old`, `fips` or `custom`. + # If you want to enable TLSv1.1, this value has to be `old`. # # See https://wiki.mozilla.org/Security/Server_Side_TLS # for detailed descriptions of each cipher @@ -762,13 +763,15 @@ # If you use DHE ciphers, you must also # configure the `ssl_dhparam` parameter. -#ssl_protocols = TLSv1.1 TLSv1.2 TLSv1.3 +#ssl_protocols = TLSv1.2 TLSv1.3 # Enables the specified protocols for # client-side connections. The set of # supported protocol versions also depends # on the version of OpenSSL Kong was built # with. This value is ignored if # `ssl_cipher_suite` is not `custom`. + # If you want to enable TLSv1.1, you should + # set `ssl_cipher_suite` to `old`. # # See http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols @@ -1763,7 +1766,7 @@ # # See https://github.com/openresty/lua-nginx-module#lua_ssl_verify_depth -#lua_ssl_protocols = TLSv1.1 TLSv1.2 TLSv1.3 # Defines the TLS versions supported +#lua_ssl_protocols = TLSv1.2 TLSv1.3 # Defines the TLS versions supported # when handshaking with OpenResty's # TCP cosocket APIs. # diff --git a/kong/conf_loader/parse.lua b/kong/conf_loader/parse.lua index 841bff4e1b4..bcdb9f0ff46 100644 --- a/kong/conf_loader/parse.lua +++ b/kong/conf_loader/parse.lua @@ -432,6 +432,23 @@ local function check_and_parse(conf, opts) conf.ssl_dhparam = suite.dhparams conf.nginx_http_ssl_dhparam = suite.dhparams conf.nginx_stream_ssl_dhparam = suite.dhparams + + else + for _, key in ipairs({ + "nginx_http_ssl_conf_command", + "nginx_http_proxy_ssl_conf_command", + "nginx_http_lua_ssl_conf_command", + "nginx_stream_ssl_conf_command", + "nginx_stream_proxy_ssl_conf_command", + "nginx_stream_lua_ssl_conf_command"}) do + + if conf[key] then + local _, _, seclevel = string.find(conf[key], "@SECLEVEL=(%d+)") + if seclevel ~= "0" then + ngx.log(ngx.WARN, key, ": Default @SECLEVEL=0 overridden, TLSv1.1 unavailable") + end + end + end end else diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index 2c0802bc72a..5c3931f9592 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -53,7 +53,7 @@ client_ssl_cert = NONE client_ssl_cert_key = NONE ssl_cipher_suite = intermediate ssl_ciphers = NONE -ssl_protocols = TLSv1.1 TLSv1.2 TLSv1.3 +ssl_protocols = TLSv1.2 TLSv1.3 ssl_prefer_server_ciphers = on ssl_dhparam = NONE ssl_session_tickets = on @@ -91,9 +91,15 @@ nginx_http_ssl_prefer_server_ciphers = NONE nginx_http_ssl_dhparam = NONE nginx_http_ssl_session_tickets = NONE nginx_http_ssl_session_timeout = NONE +nginx_http_ssl_conf_command = NONE +nginx_http_proxy_ssl_conf_command = NONE +nginx_http_lua_ssl_conf_command = NONE nginx_http_lua_regex_match_limit = 100000 nginx_http_lua_regex_cache_max_entries = 8192 nginx_http_keepalive_requests = 10000 +nginx_stream_ssl_conf_command = NONE +nginx_stream_proxy_ssl_conf_command = NONE +nginx_stream_lua_ssl_conf_command = NONE nginx_stream_ssl_protocols = NONE nginx_stream_ssl_prefer_server_ciphers = NONE nginx_stream_ssl_dhparam = NONE @@ -170,7 +176,7 @@ router_flavor = traditional_compatible lua_socket_pool_size = 256 lua_ssl_trusted_certificate = system lua_ssl_verify_depth = 1 -lua_ssl_protocols = TLSv1.1 TLSv1.2 TLSv1.3 +lua_ssl_protocols = TLSv1.2 TLSv1.3 lua_package_path = ./?.lua;./?/init.lua; lua_package_cpath = NONE diff --git a/kong/templates/nginx_kong.lua b/kong/templates/nginx_kong.lua index 405b8686ac1..8cd97849c0e 100644 --- a/kong/templates/nginx_kong.lua +++ b/kong/templates/nginx_kong.lua @@ -24,6 +24,11 @@ lua_shared_dict kong_db_cache_miss 12m; lua_shared_dict kong_secrets 5m; underscores_in_headers on; +> if ssl_cipher_suite == 'old' then +lua_ssl_conf_command CipherString DEFAULT:@SECLEVEL=0; +proxy_ssl_conf_command CipherString DEFAULT:@SECLEVEL=0; +ssl_conf_command CipherString DEFAULT:@SECLEVEL=0; +> end > if ssl_ciphers then ssl_ciphers ${{SSL_CIPHERS}}; > end @@ -503,7 +508,7 @@ server { ssl_certificate $(admin_gui_ssl_cert[i]); ssl_certificate_key $(admin_gui_ssl_cert_key[i]); > end - ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + ssl_protocols TLSv1.2 TLSv1.3; > end client_max_body_size 10m; diff --git a/kong/templates/nginx_kong_stream.lua b/kong/templates/nginx_kong_stream.lua index 4a2d9b07fbc..68a165110a8 100644 --- a/kong/templates/nginx_kong_stream.lua +++ b/kong/templates/nginx_kong_stream.lua @@ -33,6 +33,12 @@ ssl_ciphers ${{SSL_CIPHERS}}; $(el.name) $(el.value); > end +> if ssl_cipher_suite == 'old' then +lua_ssl_conf_command CipherString DEFAULT:@SECLEVEL=0; +proxy_ssl_conf_command CipherString DEFAULT:@SECLEVEL=0; +ssl_conf_command CipherString DEFAULT:@SECLEVEL=0; +> end + init_by_lua_block { > if test and coverage then require 'luacov' diff --git a/spec/01-unit/03-conf_loader_spec.lua b/spec/01-unit/03-conf_loader_spec.lua index e00b4cf515d..752471584a7 100644 --- a/spec/01-unit/03-conf_loader_spec.lua +++ b/spec/01-unit/03-conf_loader_spec.lua @@ -1584,19 +1584,19 @@ describe("Configuration loader", function() assert.is_nil(err) assert.is_table(conf) - assert.equal("TLSv1.1 TLSv1.2 TLSv1.3", conf.nginx_http_lua_ssl_protocols) - assert.equal("TLSv1.1 TLSv1.2 TLSv1.3", conf.nginx_stream_lua_ssl_protocols) + assert.equal("TLSv1.2 TLSv1.3", conf.nginx_http_lua_ssl_protocols) + assert.equal("TLSv1.2 TLSv1.3", conf.nginx_stream_lua_ssl_protocols) end) it("sets lua_ssl_protocols to user specified value", function() local conf, err = conf_loader(nil, { - lua_ssl_protocols = "TLSv1.1" + lua_ssl_protocols = "TLSv1.2" }) assert.is_nil(err) assert.is_table(conf) - assert.equal("TLSv1.1", conf.nginx_http_lua_ssl_protocols) - assert.equal("TLSv1.1", conf.nginx_stream_lua_ssl_protocols) + assert.equal("TLSv1.2", conf.nginx_http_lua_ssl_protocols) + assert.equal("TLSv1.2", conf.nginx_stream_lua_ssl_protocols) end) it("sets nginx_http_lua_ssl_protocols and nginx_stream_lua_ssl_protocols to different values", function() diff --git a/spec/01-unit/04-prefix_handler_spec.lua b/spec/01-unit/04-prefix_handler_spec.lua index 4e034e6b2f3..70956e99828 100644 --- a/spec/01-unit/04-prefix_handler_spec.lua +++ b/spec/01-unit/04-prefix_handler_spec.lua @@ -1492,7 +1492,7 @@ describe("NGINX conf compiler", function() local http_inject_conf = prefix_handler.compile_nginx_http_inject_conf(helpers.test_conf) assert.matches("lua_ssl_verify_depth%s+1;", http_inject_conf) assert.matches("lua_ssl_trusted_certificate.+;", http_inject_conf) - assert.matches("lua_ssl_protocols%s+TLSv1.1 TLSv1.2 TLSv1.3;", http_inject_conf) + assert.matches("lua_ssl_protocols%s+TLSv1.2 TLSv1.3;", http_inject_conf) end) it("sets lua_ssl_verify_depth", function() local conf = assert(conf_loader(helpers.test_conf_path, { @@ -1532,7 +1532,7 @@ describe("NGINX conf compiler", function() local stream_inject_conf = prefix_handler.compile_nginx_stream_inject_conf(helpers.test_conf) assert.matches("lua_ssl_verify_depth%s+1;", stream_inject_conf) assert.matches("lua_ssl_trusted_certificate.+;", stream_inject_conf) - assert.matches("lua_ssl_protocols%s+TLSv1.1 TLSv1.2 TLSv1.3;", stream_inject_conf) + assert.matches("lua_ssl_protocols%s+TLSv1.2 TLSv1.3;", stream_inject_conf) end) it("sets lua_ssl_verify_depth", function() local conf = assert(conf_loader(helpers.test_conf_path, { diff --git a/spec/01-unit/28-inject_confs_spec.lua b/spec/01-unit/28-inject_confs_spec.lua index ff5ea8afb9f..916a8fe1156 100644 --- a/spec/01-unit/28-inject_confs_spec.lua +++ b/spec/01-unit/28-inject_confs_spec.lua @@ -18,12 +18,12 @@ lmdb_map_size 2048m; local http_conf = fmt([[ lua_ssl_verify_depth 1; lua_ssl_trusted_certificate '%s/servroot/.ca_combined'; -lua_ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; +lua_ssl_protocols TLSv1.2 TLSv1.3; ]], cwd) local stream_conf = fmt([[ lua_ssl_verify_depth 1; lua_ssl_trusted_certificate '%s/servroot/.ca_combined'; -lua_ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; +lua_ssl_protocols TLSv1.2 TLSv1.3; ]], cwd) local args = { diff --git a/spec/fixtures/1.2_custom_nginx.template b/spec/fixtures/1.2_custom_nginx.template index a0079cafe8b..2f3851d919a 100644 --- a/spec/fixtures/1.2_custom_nginx.template +++ b/spec/fixtures/1.2_custom_nginx.template @@ -98,7 +98,7 @@ http { ssl_certificate $(ssl_cert[i]); ssl_certificate_key $(ssl_cert_key[i]); > end - ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + ssl_protocols TLSv1.2 TLSv1.3; ssl_certificate_by_lua_block { Kong.ssl_certificate() } @@ -200,7 +200,7 @@ http { ssl_certificate $(admin_ssl_cert[i]); ssl_certificate_key $(admin_ssl_cert_key[i]); > end - ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + ssl_protocols TLSv1.2 TLSv1.3; > end # injected nginx_admin_* directives @@ -237,7 +237,7 @@ http { ssl_certificate $(ssl_cert[i]); ssl_certificate_key $(ssl_cert_key[i]); > end - ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + ssl_protocols TLSv1.2 TLSv1.3; set_real_ip_from 127.0.0.1; @@ -557,7 +557,7 @@ stream { ssl_certificate $(ssl_cert[i]); ssl_certificate_key $(ssl_cert_key[i]); > end - ssl_protocols TLSv1.1 TLSv1.2; + ssl_protocols TLSv1.2; content_by_lua_block { local sock = assert(ngx.req.socket(true)) diff --git a/spec/fixtures/aws-lambda.lua b/spec/fixtures/aws-lambda.lua index 1d99bad795c..ea36367115e 100644 --- a/spec/fixtures/aws-lambda.lua +++ b/spec/fixtures/aws-lambda.lua @@ -17,7 +17,7 @@ local fixtures = { ssl_certificate ${{SSL_CERT}}; ssl_certificate_key ${{SSL_CERT_KEY}}; > end - ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + ssl_protocols TLSv1.2 TLSv1.3; location ~ "/2015-03-31/functions/(?:[^/])*/invocations" { content_by_lua_block { diff --git a/spec/fixtures/mock_webserver_tpl.lua b/spec/fixtures/mock_webserver_tpl.lua index 598f9ef2ebb..87ebbf16da5 100644 --- a/spec/fixtures/mock_webserver_tpl.lua +++ b/spec/fixtures/mock_webserver_tpl.lua @@ -85,7 +85,7 @@ http { ssl_certificate ${cert_path}/kong_spec.crt; ssl_certificate_key ${cert_path}/kong_spec.key; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_protocols TLSv1.2; ssl_ciphers HIGH:!aNULL:!MD5; #end # if check_hostname then diff --git a/spec/fixtures/template_inject/nginx_kong_test_custom_inject_http.lua b/spec/fixtures/template_inject/nginx_kong_test_custom_inject_http.lua index d66b38e6120..46439562963 100644 --- a/spec/fixtures/template_inject/nginx_kong_test_custom_inject_http.lua +++ b/spec/fixtures/template_inject/nginx_kong_test_custom_inject_http.lua @@ -12,7 +12,7 @@ lua_shared_dict kong_mock_upstream_loggers 10m; ssl_certificate $(ssl_cert[i]); ssl_certificate_key $(ssl_cert_key[i]); > end - ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + ssl_protocols TLSv1.2 TLSv1.3; set_real_ip_from 127.0.0.1; diff --git a/spec/fixtures/template_inject/nginx_kong_test_custom_inject_stream.lua b/spec/fixtures/template_inject/nginx_kong_test_custom_inject_stream.lua index 20acfa289f6..7d43af7446c 100644 --- a/spec/fixtures/template_inject/nginx_kong_test_custom_inject_stream.lua +++ b/spec/fixtures/template_inject/nginx_kong_test_custom_inject_stream.lua @@ -8,7 +8,7 @@ server { ssl_certificate $(ssl_cert[i]); ssl_certificate_key $(ssl_cert_key[i]); > end - ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + ssl_protocols TLSv1.2 TLSv1.3; content_by_lua_block { local sock = assert(ngx.req.socket()) @@ -51,4 +51,4 @@ server { proxy_socket_keepalive on; } > end -- cluster_ssl_tunnel -]] \ No newline at end of file +]] diff --git a/spec/fixtures/template_inject/nginx_kong_test_tcp_echo_server_custom_inject_stream.lua b/spec/fixtures/template_inject/nginx_kong_test_tcp_echo_server_custom_inject_stream.lua index db3aac86124..302f1455368 100644 --- a/spec/fixtures/template_inject/nginx_kong_test_tcp_echo_server_custom_inject_stream.lua +++ b/spec/fixtures/template_inject/nginx_kong_test_tcp_echo_server_custom_inject_stream.lua @@ -7,7 +7,7 @@ server { ssl_certificate $(ssl_cert[i]); ssl_certificate_key $(ssl_cert_key[i]); > end - ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + ssl_protocols TLSv1.2 TLSv1.3; content_by_lua_block { local sock = assert(ngx.req.socket()) diff --git a/spec/helpers.lua b/spec/helpers.lua index 5556774173d..a86ca9a1061 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -3603,7 +3603,7 @@ end -- -- ssl_certificate ${{SSL_CERT}}; -- ssl_certificate_key ${{SSL_CERT_KEY}}; --- ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; +-- ssl_protocols TLSv1.2 TLSv1.3; -- -- location ~ "/echobody" { -- content_by_lua_block { diff --git a/spec/helpers/http_mock/template.lua b/spec/helpers/http_mock/template.lua index 843f12c9c61..f1f11793368 100644 --- a/spec/helpers/http_mock/template.lua +++ b/spec/helpers/http_mock/template.lua @@ -128,7 +128,7 @@ $(init) # if tls then ssl_certificate ../../spec/fixtures/kong_spec.crt; ssl_certificate_key ../../spec/fixtures/kong_spec.key; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_protocols TLSv1.2; ssl_ciphers HIGH:!aNULL:!MD5; # end From 29659469eb2ae0418e165ffada230e4493ec8550 Mon Sep 17 00:00:00 2001 From: Jack Tysoe <91137069+tysoekong@users.noreply.github.com> Date: Fri, 2 Feb 2024 22:46:11 +0000 Subject: [PATCH 18/91] fix(ai-proxy): gzip decompression library has moved (#12503) --- kong/llm/drivers/shared.lua | 5 +- kong/plugins/ai-proxy/handler.lua | 3 +- .../ai-response-transformer/handler.lua | 2 +- .../08-encoding_integration_spec.lua | 366 ++++++++++++++++++ 4 files changed, 373 insertions(+), 3 deletions(-) create mode 100644 spec/03-plugins/38-ai-proxy/08-encoding_integration_spec.lua diff --git a/kong/llm/drivers/shared.lua b/kong/llm/drivers/shared.lua index dcc996c8085..f2e60327064 100644 --- a/kong/llm/drivers/shared.lua +++ b/kong/llm/drivers/shared.lua @@ -4,6 +4,7 @@ local _M = {} local cjson = require("cjson.safe") local http = require("resty.http") local fmt = string.format +local os = os -- local log_entry_keys = { @@ -18,8 +19,10 @@ local log_entry_keys = { PROVIDER_NAME = "ai.meta.provider_name", } +local openai_override = os.getenv("OPENAI_TEST_PORT") + _M.upstream_url_format = { - openai = "https://api.openai.com:443", + openai = fmt("%s://api.openai.com:%s", (openai_override and "http") or "https", (openai_override) or "443"), anthropic = "https://api.anthropic.com:443", cohere = "https://api.cohere.com:443", azure = "https://%s.openai.azure.com:443/openai/deployments/%s", diff --git a/kong/plugins/ai-proxy/handler.lua b/kong/plugins/ai-proxy/handler.lua index 89242ffc448..631a7b5b48b 100644 --- a/kong/plugins/ai-proxy/handler.lua +++ b/kong/plugins/ai-proxy/handler.lua @@ -4,7 +4,7 @@ local _M = {} local ai_shared = require("kong.llm.drivers.shared") local llm = require("kong.llm") local cjson = require("cjson.safe") -local kong_utils = require("kong.tools.utils") +local kong_utils = require("kong.tools.gzip") local kong_meta = require "kong.meta" -- @@ -37,6 +37,7 @@ function _M:header_filter(conf) if response_body then local is_gzip = kong.response.get_header("Content-Encoding") == "gzip" + if is_gzip then response_body = kong_utils.inflate_gzip(response_body) end diff --git a/kong/plugins/ai-response-transformer/handler.lua b/kong/plugins/ai-response-transformer/handler.lua index b5cde6fc0da..d4535b37e6d 100644 --- a/kong/plugins/ai-response-transformer/handler.lua +++ b/kong/plugins/ai-response-transformer/handler.lua @@ -4,7 +4,7 @@ local _M = {} local kong_meta = require "kong.meta" local http = require("resty.http") local fmt = string.format -local kong_utils = require("kong.tools.utils") +local kong_utils = require("kong.tools.gzip") local llm = require("kong.llm") -- diff --git a/spec/03-plugins/38-ai-proxy/08-encoding_integration_spec.lua b/spec/03-plugins/38-ai-proxy/08-encoding_integration_spec.lua new file mode 100644 index 00000000000..371f99b11f2 --- /dev/null +++ b/spec/03-plugins/38-ai-proxy/08-encoding_integration_spec.lua @@ -0,0 +1,366 @@ +local helpers = require "spec.helpers" +local cjson = require "cjson" +local inflate_gzip = require("kong.tools.gzip").inflate_gzip + +local PLUGIN_NAME = "ai-proxy" +local MOCK_PORT = 62349 + +local openai_driver = require("kong.llm.drivers.openai") + +local format_stencils = { + llm_v1_chat = { + good = { + + user_request = { + messages = { + [1] = { + role = "system", + content = "You are a scientist.", + }, + [2] = { + role = "user", + content = "Why can't you divide by zero?", + }, + }, + }, + + provider_response = { + choices = { + [1] = { + finish_reason = "stop", + index = 0, + messages = { + role = "assistant", + content = "Dividing by zero is undefined in mathematics because it leads to results that are contradictory or nonsensical.", + }, + }, + }, + created = 1702325640, + id = "chatcmpl-8Ugx63a79wKACVkaBbKnR2C2HPcxT", + model = "gpt-4-0613", + object = "chat.completion", + system_fingerprint = nil, + usage = { + completion_tokens = 139, + prompt_tokens = 130, + total_tokens = 269, + }, + }, + + }, + + + faulty = { + + provider_response = { + your_request = { + was_not = "correct but for some reason i return 200 anyway", + }, + }, + + }, + + unauthorized = { + + provider_response = { + error = { + message = "bad API key", + } + }, + + }, + + error = { + + provider_response = { + error = { + message = "some failure", + }, + }, + }, + + error_faulty = { + + provider_response = { + bad_message = { + bad_error = { + unauthorized = "some failure with weird json", + }, + } + }, + + }, + + }, +} + +local plugin_conf = { + route_type = "llm/v1/chat", + auth = { + header_name = "Authorization", + header_value = "Bearer openai-key", + }, + model = { + name = "gpt-4", + provider = "openai", + options = { + max_tokens = 256, + temperature = 1.0, + }, + }, +} + +for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then + describe(PLUGIN_NAME .. ": (access) [#" .. strategy .. "]", function() + local client + + lazy_setup(function() + local bp = helpers.get_db_utils(strategy == "off" and "postgres" or strategy, nil, { PLUGIN_NAME }) + + -- set up openai mock fixtures + local fixtures = { + http_mock = {}, + dns_mock = helpers.dns_mock.new({ + mocks_only = true, -- don't fallback to "real" DNS + }), + } + + fixtures.dns_mock:A { + name = "api.openai.com", + address = "127.0.0.1", + } + + -- openai llm driver will always send to this port, if var is set + helpers.setenv("OPENAI_TEST_PORT", tostring(MOCK_PORT)) + + fixtures.http_mock.openai = [[ + server { + server_name openai; + listen ]]..MOCK_PORT..[[; + + default_type 'application/json'; + + location = "/v1/chat/completions" { + content_by_lua_block { + local json = require("cjson.safe") + local inflate_gzip = require("kong.tools.gzip").inflate_gzip + local deflate_gzip = require("kong.tools.gzip").deflate_gzip + + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local token = ngx.req.get_headers()["authorization"] + local token_query = ngx.req.get_uri_args()["apikey"] + + if token == "Bearer openai-key" or token_query == "openai-key" or body.apikey == "openai-key" then + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + if err or (body.messages == ngx.null) then + ngx.status = 400 + -- ngx.print(pl_file.read("spec/fixtures/ai-proxy/openai/llm-v1-chat/responses/bad_request.json")) + else + local test_type = ngx.req.get_headers()['x-test-type'] + + -- switch based on test type requested + if test_type == ngx.null or test_type == "200" then + ngx.status = 200 + ngx.header["content-encoding"] = "gzip" + local response = deflate_gzip(']] .. cjson.encode(format_stencils.llm_v1_chat.good.provider_response) .. [[') + ngx.print(response) + elseif test_type == "200_FAULTY" then + ngx.status = 200 + ngx.header["content-encoding"] = "gzip" + local response = deflate_gzip(']] .. cjson.encode(format_stencils.llm_v1_chat.faulty.provider_response) .. [[') + ngx.print(response) + elseif test_type == "401" then + ngx.status = 401 + ngx.header["content-encoding"] = "gzip" + local response = deflate_gzip(']] .. cjson.encode(format_stencils.llm_v1_chat.unauthorized.provider_response) .. [[') + ngx.print(response) + elseif test_type == "500" then + ngx.status = 500 + ngx.header["content-encoding"] = "gzip" + local response = deflate_gzip(']] .. cjson.encode(format_stencils.llm_v1_chat.error.provider_response) .. [[') + ngx.print(response) + elseif test_type == "500_FAULTY" then + ngx.status = 500 + ngx.header["content-encoding"] = "gzip" + local response = deflate_gzip(']] .. cjson.encode(format_stencils.llm_v1_chat.error_faulty.provider_response) .. [[') + ngx.print(response) + end + end + else + ngx.status = 401 + -- ngx.print(pl_file.read("spec/fixtures/ai-proxy/openai/llm-v1-chat/responses/unauthorized.json")) + end + } + } + + } + ]] + + local empty_service = assert(bp.services:insert { + name = "empty_service", + host = "localhost", + port = 8080, + path = "/", + }) + + -- 200 chat good, gzipped from server + local openai_chat = assert(bp.routes:insert { + service = empty_service, + protocols = { "http" }, + strip_path = true, + paths = { "/openai/llm/v1/chat" } + }) + bp.plugins:insert { + name = PLUGIN_NAME, + route = { id = openai_chat.id }, + config = plugin_conf, + } + -- + + -- start kong + assert(helpers.start_kong({ + -- set the strategy + database = strategy, + -- use the custom test template to create a local mock server + nginx_conf = "spec/fixtures/custom_nginx.template", + -- make sure our plugin gets loaded + plugins = "bundled," .. PLUGIN_NAME, + -- write & load declarative config, only if 'strategy=off' + declarative_config = strategy == "off" and helpers.make_yaml_file() or nil, + }, nil, nil, fixtures)) + end) + + lazy_teardown(function() + helpers.stop_kong(nil, true) + end) + + before_each(function() + client = helpers.proxy_client() + end) + + after_each(function() + if client then client:close() end + end) + + + ---- TESTS + describe("returns deflated response to client", function() + it("200 from LLM", function() + local r = client:get("/openai/llm/v1/chat", { + headers = { + ["content-type"] = "application/json", + ["accept"] = "application/json", + ["x-test-type"] = "200", + }, + body = format_stencils.llm_v1_chat.good.user_request, + }) + + -- validate that the request succeeded, response status 200 + local actual_response_string = assert.res_status(200 , r) + actual_response_string = inflate_gzip(actual_response_string) + local actual_response, err = cjson.decode(actual_response_string) + assert.is_falsy(err) + + -- execute the response format transformer manually + local expected_response_string, err = cjson.encode(format_stencils.llm_v1_chat.good.provider_response) + assert.is_falsy(err) + + local expected_response, err = openai_driver.from_format(expected_response_string, plugin_conf.model, plugin_conf.route_type) + assert.is_falsy(err) + expected_response, err = cjson.decode(expected_response) + assert.is_falsy(err) + + -- compare the webserver vs code responses objects + assert.same(expected_response, actual_response) + end) + end) + + it("200 from LLM but with faulty response format", function() + local r = client:get("/openai/llm/v1/chat", { + headers = { + ["content-type"] = "application/json", + ["accept"] = "application/json", + ["x-test-type"] = "200_FAULTY", + }, + body = format_stencils.llm_v1_chat.good.user_request, + }) + + -- validate that the request succeeded, response status 200 + local actual_response_string = assert.res_status(500 , r) + actual_response_string = inflate_gzip(actual_response_string) + local actual_response, err = cjson.decode(actual_response_string) + assert.is_falsy(err) + + -- compare the webserver vs expected error + assert.same({ error = { message = "transformation failed from type openai://llm/v1/chat: 'choices' not in llm/v1/chat response" }}, actual_response) + end) + + it("401 from LLM", function() + local r = client:get("/openai/llm/v1/chat", { + headers = { + ["content-type"] = "application/json", + ["accept"] = "application/json", + ["x-test-type"] = "401", + }, + body = format_stencils.llm_v1_chat.good.user_request, + }) + + -- validate that the request succeeded, response status 200 + local actual_response_string = assert.res_status(401 , r) + actual_response_string = inflate_gzip(actual_response_string) + local actual_response, err = cjson.decode(actual_response_string) + assert.is_falsy(err) + + -- compare the webserver vs expected error + assert.same({ error = { message = "bad API key" }}, actual_response) + end) + + it("500 from LLM", function() + local r = client:get("/openai/llm/v1/chat", { + headers = { + ["content-type"] = "application/json", + ["accept"] = "application/json", + ["x-test-type"] = "500", + }, + body = format_stencils.llm_v1_chat.good.user_request, + }) + + -- validate that the request succeeded, response status 200 + local actual_response_string = assert.res_status(500 , r) + actual_response_string = inflate_gzip(actual_response_string) + local actual_response, err = cjson.decode(actual_response_string) + assert.is_falsy(err) + + -- compare the webserver vs expected error + assert.same({ error = { message = "some failure" }}, actual_response) + end) + + it("500 from LLM but with faulty response format", function() + local r = client:get("/openai/llm/v1/chat", { + headers = { + ["content-type"] = "application/json", + ["accept"] = "application/json", + ["x-test-type"] = "500_FAULTY", + }, + body = format_stencils.llm_v1_chat.good.user_request, + }) + + -- validate that the request succeeded, response status 200 + local actual_response_string = assert.res_status(500 , r) + actual_response_string = inflate_gzip(actual_response_string) + local actual_response, err = cjson.decode(actual_response_string) + assert.is_falsy(err) + + -- compare the webserver vs expected error + assert.same({ bad_message = { bad_error = { unauthorized = "some failure with weird json" }}}, actual_response) + end) + end) + ---- + +end end From d142390fddca08e5ec0f0713cc123f1001d27e98 Mon Sep 17 00:00:00 2001 From: Zachary Hu <6426329+outsinre@users.noreply.github.com> Date: Mon, 5 Feb 2024 11:15:29 +0800 Subject: [PATCH 19/91] fix(core): use `-1` as the worker ID of privileged agent to avoid access issues (#12385) By default, `ngx.worker.id()` returns `nil` for the privileged agent. Now Fall back to `-1` as the worker ID of privileged agent worker to avoid error. --------- Co-authored-by: Datong Sun --- changelog/unreleased/kong/fix_privileged_agent_id_1.yml | 4 ++++ kong/api/routes/kong.lua | 2 +- kong/clustering/control_plane.lua | 2 +- kong/plugins/acme/handler.lua | 2 +- kong/plugins/statsd/log.lua | 2 +- kong/runloop/handler.lua | 2 +- kong/runloop/log_level.lua | 4 ++-- kong/runloop/plugin_servers/pb_rpc.lua | 2 +- spec/02-integration/20-wasm/05-cache-invalidation_spec.lua | 2 +- 9 files changed, 13 insertions(+), 9 deletions(-) create mode 100644 changelog/unreleased/kong/fix_privileged_agent_id_1.yml diff --git a/changelog/unreleased/kong/fix_privileged_agent_id_1.yml b/changelog/unreleased/kong/fix_privileged_agent_id_1.yml new file mode 100644 index 00000000000..0cabc3796bf --- /dev/null +++ b/changelog/unreleased/kong/fix_privileged_agent_id_1.yml @@ -0,0 +1,4 @@ +message: | + Use `-1` as the worker ID of privileged agent to avoid access issues. +type: bugfix +scope: Core diff --git a/kong/api/routes/kong.lua b/kong/api/routes/kong.lua index 16a2d4c7dcd..a80615302c3 100644 --- a/kong/api/routes/kong.lua +++ b/kong/api/routes/kong.lua @@ -254,7 +254,7 @@ return { GET = function (self, db, helpers) local body = { worker = { - id = ngx.worker.id(), + id = ngx.worker.id() or -1, count = ngx.worker.count(), }, stats = kong.timer:stats({ diff --git a/kong/clustering/control_plane.lua b/kong/clustering/control_plane.lua index 317466e2a82..aec39586c99 100644 --- a/kong/clustering/control_plane.lua +++ b/kong/clustering/control_plane.lua @@ -119,7 +119,7 @@ function _M:export_deflated_reconfigure_payload() end -- store serialized plugins map for troubleshooting purposes - local shm_key_name = "clustering:cp_plugins_configured:worker_" .. worker_id() + local shm_key_name = "clustering:cp_plugins_configured:worker_" .. (worker_id() or -1) kong_dict:set(shm_key_name, cjson_encode(self.plugins_configured)) ngx_log(ngx_DEBUG, "plugin configuration map key: ", shm_key_name, " configuration: ", kong_dict:get(shm_key_name)) diff --git a/kong/plugins/acme/handler.lua b/kong/plugins/acme/handler.lua index 58cf7fa6000..f33efd637be 100644 --- a/kong/plugins/acme/handler.lua +++ b/kong/plugins/acme/handler.lua @@ -83,7 +83,7 @@ end function ACMEHandler:init_worker() - local worker_id = ngx.worker.id() + local worker_id = ngx.worker.id() or -1 kong.log.info("acme renew timer started on worker ", worker_id) ngx.timer.every(86400, renew) end diff --git a/kong/plugins/statsd/log.lua b/kong/plugins/statsd/log.lua index d0bede908d6..193867193ac 100644 --- a/kong/plugins/statsd/log.lua +++ b/kong/plugins/statsd/log.lua @@ -441,7 +441,7 @@ function _M.execute(conf) kong.log.debug("Status code is within given status code ranges") if not worker_id then - worker_id = ngx.worker.id() + worker_id = ngx.worker.id() or -1 end conf._prefix = conf.prefix diff --git a/kong/runloop/handler.lua b/kong/runloop/handler.lua index 01efbdfbf3a..e6cf91469f9 100644 --- a/kong/runloop/handler.lua +++ b/kong/runloop/handler.lua @@ -634,7 +634,7 @@ do local CURRENT_BALANCER_HASH = 0 reconfigure_handler = function(data) - local worker_id = ngx_worker_id() + local worker_id = ngx_worker_id() or -1 if exiting() then log(NOTICE, "declarative reconfigure was canceled on worker #", worker_id, diff --git a/kong/runloop/log_level.lua b/kong/runloop/log_level.lua index 90c545bcae3..5f253375246 100644 --- a/kong/runloop/log_level.lua +++ b/kong/runloop/log_level.lua @@ -41,7 +41,7 @@ local function init_handler() - ngx.time() if shm_log_level and cur_log_level ~= shm_log_level and timeout > 0 then - set_log_level(ngx.worker.id(), shm_log_level, timeout) + set_log_level(ngx.worker.id() or -1, shm_log_level, timeout) end end @@ -68,7 +68,7 @@ end -- log level worker event updates local function worker_handler(data) - local worker = ngx.worker.id() + local worker = ngx.worker.id() or -1 log(NOTICE, "log level worker event received for worker ", worker) diff --git a/kong/runloop/plugin_servers/pb_rpc.lua b/kong/runloop/plugin_servers/pb_rpc.lua index dc2d15393e2..8aae88de866 100644 --- a/kong/runloop/plugin_servers/pb_rpc.lua +++ b/kong/runloop/plugin_servers/pb_rpc.lua @@ -371,7 +371,7 @@ function Rpc:call_start_instance(plugin_name, conf) return nil, err end - kong.log.debug("started plugin server: seq ", conf.__seq__, ", worker ", ngx.worker.id(), ", instance id ", + kong.log.debug("started plugin server: seq ", conf.__seq__, ", worker ", ngx.worker.id() or -1, ", instance id ", status.instance_status.instance_id) return { diff --git a/spec/02-integration/20-wasm/05-cache-invalidation_spec.lua b/spec/02-integration/20-wasm/05-cache-invalidation_spec.lua index adcb622e261..1b044f2759b 100644 --- a/spec/02-integration/20-wasm/05-cache-invalidation_spec.lua +++ b/spec/02-integration/20-wasm/05-cache-invalidation_spec.lua @@ -188,7 +188,7 @@ describe("#wasm filter chain cache " .. mode_suffix, function() rewrite = {[[ kong.response.set_header( "]] .. WORKER_ID_HEADER .. [[", - ngx.worker.id() + ngx.worker.id() or -1 ) ]]} } From cbaa2298ee2326d90c7b75d58566098b05df4fac Mon Sep 17 00:00:00 2001 From: subnetmarco <88.marco@gmail.com> Date: Thu, 1 Feb 2024 07:59:34 -0500 Subject: [PATCH 20/91] docs(readme/features): adding AI gateway highlights --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e982fd6c5f1..0118d61c17f 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,7 @@ The top Kong features include: - Authentication and authorization for APIs using methods like JWT, basic auth, OAuth, ACLs and more. - Proxy, SSL/TLS termination, and connectivity support for L4 or L7 traffic. - Plugins for enforcing traffic controls, rate limiting, req/res transformations, logging, monitoring and including a plugin developer hub. +- Plugins for AI traffic to support multi-LLM implementations and no-code AI use cases, with advanced AI prompt engineering, AI observability, AI security and more. - Sophisticated deployment models like Declarative Databaseless Deployment and Hybrid Deployment (control plane/data plane separation) without any vendor lock-in. - Native [ingress controller](https://github.com/Kong/kubernetes-ingress-controller) support for serving Kubernetes. From 51cf38080b3998781b77a3ab55cf327142373d2a Mon Sep 17 00:00:00 2001 From: Jun Ouyang Date: Mon, 5 Feb 2024 18:23:13 +0800 Subject: [PATCH 21/91] chore(deps): bump h2client version to v0.4.4 (#12535) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 21de2dca16e..af0ff49c799 100644 --- a/Makefile +++ b/Makefile @@ -45,7 +45,7 @@ ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) KONG_SOURCE_LOCATION ?= $(ROOT_DIR) GRPCURL_VERSION ?= 1.8.5 BAZLISK_VERSION ?= 1.19.0 -H2CLIENT_VERSION ?= 0.4.0 +H2CLIENT_VERSION ?= 0.4.4 BAZEL := $(shell command -v bazel 2> /dev/null) VENV = /dev/null # backward compatibility when no venv is built From 1b625db194b7d1e32b33d803eb75c703479bb4ed Mon Sep 17 00:00:00 2001 From: Enrique Garcia Cota Date: Fri, 2 Feb 2024 22:32:31 +0100 Subject: [PATCH 22/91] docs(release): genereate 3.6.0 changelog --- changelog/3.6.0/3.6.0.md | 388 ++++++++++++++++++ .../kong-manager/entity_form_preview.yml | 3 + .../redesigned_basic_components.yml | 3 + .../standardized_notification_format.yml | 3 + .../kong-manager/unified_plugin_pages.yml | 3 + changelog/3.6.0/kong/.gitkeep | 0 .../kong/add-ai-prompt-decorator-plugin.yml | 0 .../kong/add-ai-prompt-guard-plugin.yml | 0 .../kong/add-ai-prompt-template-plugin.yml | 0 .../kong/add-ai-proxy-plugin.yml | 0 .../kong/add-ai-proxy-telemetry.yml | 0 .../add-ai-request-transformer-plugin.yml | 0 .../add-ai-response-transformer-plugin.yml | 0 ...way-edition-to-root-endpoint-admin-api.yml | 0 .../kong/add_ngx_brotli_module.yml | 0 .../kong/atc_reuse_context.yml | 0 .../kong/basic_www_authenticate.yml | 0 .../kong/bump-atc-router.yml | 0 .../bump-cocurrency-limit-of-timer-ng.yml | 0 .../kong/bump-lapis-1.16.0.1.yml | 0 .../kong/bump-lpeg-1.1.0.yml | 0 .../kong/bump-lua-messagepack-0.5.3.yml | 0 .../kong/bump-lua-messagepack-0.5.4.yml | 0 .../kong/bump-lua-resty-aws-1.3.6.yml | 0 .../kong/bump-lua-resty-healthcheck-3.0.1.yml | 0 .../kong/bump-lua-resty-lmdb-1.4.1.yml | 0 .../kong/bump-lua-resty-timer-ng-to-0.2.6.yml | 0 .../kong/bump-ngx-wasm-module.yml | 0 .../kong/bump-openresty.yml | 0 .../kong/bump-openssl.yml | 0 .../kong/bump-resty-openssl.yml | 0 .../kong/bump-wasmtime.yml | 0 .../kong/bump_dns_stale_ttl.yml | 0 .../kong/bump_ngx_brotli.yml | 0 .../kong/ca_certificates_reference_check.yml | 0 .../clustering-empty-data-plane-hash-fix.yml | 0 .../kong/cookie-name-validator.yml | 0 .../kong/cp-expose-dp-cert-details.yml | 0 .../kong/dao-pk-as-entity.yml | 0 .../kong/debian-12-support.yml | 0 .../kong/declarative_config_fix.yml | 0 .../kong/default_status_port.yml | 0 .../kong/deps_bump_lua_resty_healthcheck.yml | 0 ...splay-warning-message-for-km-misconfig.yml | 0 .../enhance_admin_api_auth_error_response.yml | 0 .../kong/error_handler_494.yml | 0 .../expression_http_headers_sensitive.yml | 0 .../kong/expressions_not_operator.yml | 0 .../feat-add-cipher-to-the-intermediate.yml | 0 ...declarative-config-flattened-data-loss.yml | 0 .../kong/fix-error-message-print.yml | 0 .../kong/fix-ldoc-intermittent-fail.yml | 0 ...fix-pdk-response-set-header-with-table.yml | 0 ...fix-upstream-uri-azure-function-plugin.yml | 0 .../kong/fix-wasm-module-branch.yml | 0 .../kong/fix_dns_blocking.yml | 0 .../kong/fix_dns_disable_dns_no_sync.yml | 0 .../fix_dns_instrument_error_handling.yml | 0 .../kong/inject-nginx-directives-location.yml | 0 .../kong/introduce_lmdb_validation_tag.yml | 0 .../kong/log-serializer-source-property.yml | 0 .../kong/optimize_keepalive_parameters.yml | 0 .../pdk-json-encoding-numbers-precision.yml | 0 ...response-send-remove-transfer-encoding.yml | 0 .../kong/perf-tracing-from-timers.yml | 0 .../kong/plugin-server-instance-leak.yml | 0 .../{unreleased => 3.6.0}/kong/postremove.yml | 0 .../prometheus_expose_no_service_metrics.yml | 0 .../rate-limiting-fix-redis-sync-rate.yml | 0 .../kong/respect-custom-proxy_access_log.yml | 0 .../kong/rl-shared-sync-timer.yml | 0 .../kong/router-report-yield.yml | 0 ...ss-routes-still-trigger-datalog-plugin.yml | 0 .../standardize-redis-conifguration-acme.yml | 0 ...dize-redis-conifguration-rate-limiting.yml | 0 ...ardize-redis-conifguration-response-rl.yml | 0 ...subsystems_do_not_share_router_schemas.yml | 0 .../kong/support_http_path_segments_field.yml | 0 ...upport_net_src_dst_field_in_expression.yml | 0 .../kong/tracing-dns-query-patch.yml | 0 .../kong/tracing-sampling-rate-scope.yml | 0 .../kong/validate_private_key.yml | 0 .../kong/wasm-attach.yml | 0 .../kong/wasm-dynamic-properties.yml | 0 .../kong/wasm-injected-shm-kv.yml | 0 85 files changed, 400 insertions(+) create mode 100644 changelog/3.6.0/3.6.0.md create mode 100644 changelog/3.6.0/kong-manager/entity_form_preview.yml create mode 100644 changelog/3.6.0/kong-manager/redesigned_basic_components.yml create mode 100644 changelog/3.6.0/kong-manager/standardized_notification_format.yml create mode 100644 changelog/3.6.0/kong-manager/unified_plugin_pages.yml create mode 100644 changelog/3.6.0/kong/.gitkeep rename changelog/{unreleased => 3.6.0}/kong/add-ai-prompt-decorator-plugin.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/add-ai-prompt-guard-plugin.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/add-ai-prompt-template-plugin.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/add-ai-proxy-plugin.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/add-ai-proxy-telemetry.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/add-ai-request-transformer-plugin.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/add-ai-response-transformer-plugin.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/add-gateway-edition-to-root-endpoint-admin-api.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/add_ngx_brotli_module.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/atc_reuse_context.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/basic_www_authenticate.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-atc-router.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-cocurrency-limit-of-timer-ng.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-lapis-1.16.0.1.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-lpeg-1.1.0.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-lua-messagepack-0.5.3.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-lua-messagepack-0.5.4.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-lua-resty-aws-1.3.6.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-lua-resty-healthcheck-3.0.1.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-lua-resty-lmdb-1.4.1.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-lua-resty-timer-ng-to-0.2.6.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-ngx-wasm-module.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-openresty.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-openssl.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-resty-openssl.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump-wasmtime.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump_dns_stale_ttl.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/bump_ngx_brotli.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/ca_certificates_reference_check.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/clustering-empty-data-plane-hash-fix.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/cookie-name-validator.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/cp-expose-dp-cert-details.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/dao-pk-as-entity.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/debian-12-support.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/declarative_config_fix.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/default_status_port.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/deps_bump_lua_resty_healthcheck.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/display-warning-message-for-km-misconfig.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/enhance_admin_api_auth_error_response.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/error_handler_494.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/expression_http_headers_sensitive.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/expressions_not_operator.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/feat-add-cipher-to-the-intermediate.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/fix-declarative-config-flattened-data-loss.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/fix-error-message-print.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/fix-ldoc-intermittent-fail.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/fix-pdk-response-set-header-with-table.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/fix-upstream-uri-azure-function-plugin.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/fix-wasm-module-branch.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/fix_dns_blocking.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/fix_dns_disable_dns_no_sync.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/fix_dns_instrument_error_handling.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/inject-nginx-directives-location.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/introduce_lmdb_validation_tag.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/log-serializer-source-property.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/optimize_keepalive_parameters.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/pdk-json-encoding-numbers-precision.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/pdk-response-send-remove-transfer-encoding.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/perf-tracing-from-timers.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/plugin-server-instance-leak.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/postremove.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/prometheus_expose_no_service_metrics.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/rate-limiting-fix-redis-sync-rate.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/respect-custom-proxy_access_log.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/rl-shared-sync-timer.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/router-report-yield.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/serviceless-routes-still-trigger-datalog-plugin.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/standardize-redis-conifguration-acme.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/standardize-redis-conifguration-rate-limiting.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/standardize-redis-conifguration-response-rl.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/subsystems_do_not_share_router_schemas.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/support_http_path_segments_field.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/support_net_src_dst_field_in_expression.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/tracing-dns-query-patch.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/tracing-sampling-rate-scope.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/validate_private_key.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/wasm-attach.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/wasm-dynamic-properties.yml (100%) rename changelog/{unreleased => 3.6.0}/kong/wasm-injected-shm-kv.yml (100%) diff --git a/changelog/3.6.0/3.6.0.md b/changelog/3.6.0/3.6.0.md new file mode 100644 index 00000000000..58f0a362c01 --- /dev/null +++ b/changelog/3.6.0/3.6.0.md @@ -0,0 +1,388 @@ +## Kong + + +### Performance +#### Performance + +- Bumped the concurrency range of the lua-resty-timer-ng library from [32, 256] to [512, 2048]. + [#12275](https://github.com/Kong/kong/issues/12275) + [KAG-2932](https://konghq.atlassian.net/browse/KAG-2932) [KAG-3452](https://konghq.atlassian.net/browse/KAG-3452) + +- Cooperatively yield when building statistics of routes to reduce the impact to proxy path latency. + [#12013](https://github.com/Kong/kong/issues/12013) + +#### Configuration + +- Bump `dns_stale_ttl` default to 1 hour so stale DNS record can be used for longer time in case of resolver downtime. + [#12087](https://github.com/Kong/kong/issues/12087) + [KAG-3080](https://konghq.atlassian.net/browse/KAG-3080) + +- Bumped default values of `nginx_http_keepalive_requests` and `upstream_keepalive_max_requests` to `10000`. + [#12223](https://github.com/Kong/kong/issues/12223) + [KAG-3360](https://konghq.atlassian.net/browse/KAG-3360) +#### Core + +- Reuse match context between requests to avoid frequent memory allocation/deallocation + [#12258](https://github.com/Kong/kong/issues/12258) + [KAG-3448](https://konghq.atlassian.net/browse/KAG-3448) +#### PDK + +- Performance optimization to avoid unnecessary creations and garbage-collections of spans + [#12080](https://github.com/Kong/kong/issues/12080) + [KAG-3169](https://konghq.atlassian.net/browse/KAG-3169) + +### Breaking Changes +#### Core + +- **BREAKING:** To avoid ambiguity with other Wasm-related nginx.conf directives, the prefix for Wasm `shm_kv` nginx.conf directives was changed from `nginx_wasm_shm_` to `nginx_wasm_shm_kv_` + [#11919](https://github.com/Kong/kong/issues/11919) + [KAG-2355](https://konghq.atlassian.net/browse/KAG-2355) +#### Plugin + +- **azure-functions**: azure-functions plugin now eliminates upstream/request URI and only use `routeprefix` configuration field to construct request path when requesting Azure API + [#11850](https://github.com/Kong/kong/issues/11850) + [KAG-2841](https://konghq.atlassian.net/browse/KAG-2841) + +### Deprecations +#### Plugin + +- **ACME**: Standardize redis configuration across plugins. The redis configuration right now follows common schema that is shared across other plugins. + [#12300](https://github.com/Kong/kong/issues/12300) + [KAG-3388](https://konghq.atlassian.net/browse/KAG-3388) + +- **Rate Limiting**: Standardize redis configuration across plugins. The redis configuration right now follows common schema that is shared across other plugins. + [#12301](https://github.com/Kong/kong/issues/12301) + [KAG-3388](https://konghq.atlassian.net/browse/KAG-3388) + +- **Response-RateLimiting**: Standardize redis configuration across plugins. The redis configuration right now follows common schema that is shared across other plugins. + [#12301](https://github.com/Kong/kong/issues/12301) + [KAG-3388](https://konghq.atlassian.net/browse/KAG-3388) + +### Dependencies +#### Core + +- Bumped atc-router from 1.2.0 to 1.6.0 + [#12231](https://github.com/Kong/kong/issues/12231) + [KAG-3403](https://konghq.atlassian.net/browse/KAG-3403) + +- Bumped kong-lapis from 1.14.0.3 to 1.16.0.1 + [#12064](https://github.com/Kong/kong/issues/12064) + + +- Bumped LPEG from 1.0.2 to 1.1.0 + [#11955](https://github.com/Kong/kong/issues/11955) + [UTF-8](https://konghq.atlassian.net/browse/UTF-8) + +- Bumped lua-messagepack from 0.5.2 to 0.5.3 + [#11956](https://github.com/Kong/kong/issues/11956) + + +- Bumped lua-messagepack from 0.5.3 to 0.5.4 + [#12076](https://github.com/Kong/kong/issues/12076) + + +- Bumped lua-resty-aws from 1.3.5 to 1.3.6 + [#12439](https://github.com/Kong/kong/issues/12439) + + +- Bumped lua-resty-healthcheck from 3.0.0 to 3.0.1 + [#12237](https://github.com/Kong/kong/issues/12237) + [FTI-5478](https://konghq.atlassian.net/browse/FTI-5478) + +- Bumped lua-resty-lmdb from 1.3.0 to 1.4.1 + [#12026](https://github.com/Kong/kong/issues/12026) + [KAG-3093](https://konghq.atlassian.net/browse/KAG-3093) + +- Bumped lua-resty-timer-ng from 0.2.5 to 0.2.6 + [#12275](https://github.com/Kong/kong/issues/12275) + [KAG-2932](https://konghq.atlassian.net/browse/KAG-2932) [KAG-3452](https://konghq.atlassian.net/browse/KAG-3452) + +- Bumped OpenResty from 1.21.4.2 to 1.25.3.1 + [#12327](https://github.com/Kong/kong/issues/12327) + [KAG-3515](https://konghq.atlassian.net/browse/KAG-3515) [KAG-3570](https://konghq.atlassian.net/browse/KAG-3570) [KAG-3571](https://konghq.atlassian.net/browse/KAG-3571) [JIT-2](https://konghq.atlassian.net/browse/JIT-2) + +- Bumped OpenSSL from 3.1.4 to 3.2.1 + [#12264](https://github.com/Kong/kong/issues/12264) + [KAG-3459](https://konghq.atlassian.net/browse/KAG-3459) + +- Bump resty-openssl from 0.8.25 to 1.2.0 + [#12265](https://github.com/Kong/kong/issues/12265) + + +- Bumped ngx_brotli to master branch, and disabled it on rhel7 rhel9-arm64 and amazonlinux-2023-arm64 due to toolchain issues + [#12444](https://github.com/Kong/kong/issues/12444) + [FTI-5706](https://konghq.atlassian.net/browse/FTI-5706) + +- Bumped lua-resty-healthcheck from 1.6.3 to 3.0.0 + [#11834](https://github.com/Kong/kong/issues/11834) + [KAG-2704](https://konghq.atlassian.net/browse/KAG-2704) +#### Default + +- Bump `ngx_wasm_module` to `a7087a37f0d423707366a694630f1e09f4c21728` + [#12011](https://github.com/Kong/kong/issues/12011) + + +- Bump `Wasmtime` version to `14.0.3` + [#12011](https://github.com/Kong/kong/issues/12011) + + +### Features +#### Configuration + +- display a warning message when Kong Manager is enabled but the Admin API is not enabled + [#12071](https://github.com/Kong/kong/issues/12071) + [KAG-3158](https://konghq.atlassian.net/browse/KAG-3158) + +- add DHE-RSA-CHACHA20-POLY1305 cipher to the intermediate configuration + [#12133](https://github.com/Kong/kong/issues/12133) + [KAG-3257](https://konghq.atlassian.net/browse/KAG-3257) + +- The default value of `dns_no_sync` option has been changed to `off` + [#11869](https://github.com/Kong/kong/issues/11869) + [FTI-5348](https://konghq.atlassian.net/browse/FTI-5348) + +- Allow to inject Nginx directives into Kong's proxy location block + [#11623](https://github.com/Kong/kong/issues/11623) + + +- Validate LMDB cache by Kong's version (major + minor), +wiping the content if tag mismatch to avoid compatibility issues +during minor version upgrade. + [#12026](https://github.com/Kong/kong/issues/12026) + [KAG-3093](https://konghq.atlassian.net/browse/KAG-3093) +#### Core + +- Adds telemetry collection for AI Proxy, AI Request Transformer, and AI Response Transformer, pertaining to model and provider usage. + [#12495](https://github.com/Kong/kong/issues/12495) + + +- add ngx_brotli module to kong prebuild nginx + [#12367](https://github.com/Kong/kong/issues/12367) + [KAG-2477](https://konghq.atlassian.net/browse/KAG-2477) + +- Allow primary key passed as a full entity to DAO functions. + [#11695](https://github.com/Kong/kong/issues/11695) + + +- Build deb packages for Debian 12. The debian variant of kong docker image is built using Debian 12 now. + [#12218](https://github.com/Kong/kong/issues/12218) + [KAG-3015](https://konghq.atlassian.net/browse/KAG-3015) + +- The expressions route now supports the `!` (not) operator, which allows creating routes like +`!(http.path =^ "/a")` and `!(http.path == "/a" || http.path == "/b")` + [#12419](https://github.com/Kong/kong/issues/12419) + [KAG-3605](https://konghq.atlassian.net/browse/KAG-3605) + +- Add `source` property to log serializer, indicating the response is generated by `kong` or `upstream`. + [#12052](https://github.com/Kong/kong/issues/12052) + [FTI-5522](https://konghq.atlassian.net/browse/FTI-5522) + +- Ensure Kong-owned directories are cleaned up after an uninstall using the system's package manager. + [#12162](https://github.com/Kong/kong/issues/12162) + [FTI-5553](https://konghq.atlassian.net/browse/FTI-5553) + +- Support `http.path.segments.len` and `http.path.segments.*` fields in the expressions router +which allows matching incoming (normalized) request path by individual segment or ranges of segments, +plus checking the total number of segments. + [#12283](https://github.com/Kong/kong/issues/12283) + [KAG-3351](https://konghq.atlassian.net/browse/KAG-3351) + +- `net.src.*` and `net.dst.*` match fields are now accessible in HTTP routes defined using expressions. + [#11950](https://github.com/Kong/kong/issues/11950) + [KAG-2963](https://konghq.atlassian.net/browse/KAG-2963) [KAG-3032](https://konghq.atlassian.net/browse/KAG-3032) + +- Extend support for getting and setting Gateway values via proxy-wasm properties in the `kong.*` namespace. + [#11856](https://github.com/Kong/kong/issues/11856) + +#### PDK + +- Increase the precision of JSON number encoding from 14 to 16 decimals + [#12019](https://github.com/Kong/kong/issues/12019) + [FTI-5515](https://konghq.atlassian.net/browse/FTI-5515) +#### Plugin + +- Introduced the new **AI Prompt Decorator** plugin that enables prepending and appending llm/v1/chat messages onto consumer LLM requests, for prompt tuning. + [#12336](https://github.com/Kong/kong/issues/12336) + + +- Introduced the new **AI Prompt Guard** which can allow and/or block LLM requests based on pattern matching. + [#12427](https://github.com/Kong/kong/issues/12427) + + +- Introduced the new **AI Prompt Template** which can offer consumers and array of LLM prompt templates, with variable substitutions. + [#12340](https://github.com/Kong/kong/issues/12340) + + +- Introduced the new **AI Proxy** plugin that enables simplified integration with various AI provider Large Language Models. + [#12323](https://github.com/Kong/kong/issues/12323) + + +- Introduced the new **AI Request Transformer** plugin that enables passing mid-flight consumer requests to an LLM for transformation or sanitization. + [#12426](https://github.com/Kong/kong/issues/12426) + + +- Introduced the new **AI Response Transformer** plugin that enables passing mid-flight upstream responses to an LLM for transformation or sanitization. + [#12426](https://github.com/Kong/kong/issues/12426) + + +- Tracing Sampling Rate can now be set via the `config.sampling_rate` property of the OpenTelemetry plugin instead of it just being a global setting for the gateway. + [#12054](https://github.com/Kong/kong/issues/12054) + [KAG-3126](https://konghq.atlassian.net/browse/KAG-3126) +#### Admin API + +- add gateway edition to the root endpoint of the admin api + [#12097](https://github.com/Kong/kong/issues/12097) + [FTI-5557](https://konghq.atlassian.net/browse/FTI-5557) + +- Enable `status_listen` on `127.0.0.1:8007` by default + [#12304](https://github.com/Kong/kong/issues/12304) + [KAG-3359](https://konghq.atlassian.net/browse/KAG-3359) +#### Clustering + +- **Clustering**: Expose data plane certificate expiry date on the control plane API. + [#11921](https://github.com/Kong/kong/issues/11921) + [FTI-5530](https://konghq.atlassian.net/browse/FTI-5530) + +### Fixes +#### Configuration + +- fix error data loss caused by weakly typed of function in declarative_config_flattened function + [#12167](https://github.com/Kong/kong/issues/12167) + [FTI-5584](https://konghq.atlassian.net/browse/FTI-5584) + +- respect custom `proxy_access_log` + [#12073](https://github.com/Kong/kong/issues/12073) + [FTI-5580](https://konghq.atlassian.net/browse/FTI-5580) +#### Core + +- prevent ca to be deleted when it's still referenced by other entities and invalidate the related ca store caches when a ca cert is updated. + [#11789](https://github.com/Kong/kong/issues/11789) + [FTI-2060](https://konghq.atlassian.net/browse/FTI-2060) + +- Now cookie names are validated against RFC 6265, which allows more characters than the previous validation. + [#11881](https://github.com/Kong/kong/issues/11881) + + +- Remove nulls only if the schema has transformations definitions. +Improve performance as most schemas does not define transformations. + [#12284](https://github.com/Kong/kong/issues/12284) + [FTI-5260](https://konghq.atlassian.net/browse/FTI-5260) + +- Fix a bug that the error_handler can not provide the meaningful response body when the internal error code 494 is triggered. + [#12114](https://github.com/Kong/kong/issues/12114) + [FTI-5374](https://konghq.atlassian.net/browse/FTI-5374) + +- Header value matching (`http.headers.*`) in `expressions` router flavor are now case sensitive. +This change does not affect on `traditional_compatible` mode +where header value match are always performed ignoring the case. + [#11905](https://github.com/Kong/kong/issues/11905) + [KAG-2905](https://konghq.atlassian.net/browse/KAG-2905) + +- print error message correctly when plugin fails + [#11800](https://github.com/Kong/kong/issues/11800) + [KAG-2844](https://konghq.atlassian.net/browse/KAG-2844) + +- fix ldoc intermittent failure caused by LuaJIT error. + [#11983](https://github.com/Kong/kong/issues/11983) + [KAG-1761](https://konghq.atlassian.net/browse/KAG-1761) + +- use NGX_WASM_MODULE_BRANCH environment variable to set ngx_wasm_module repository branch when building Kong. + [#12241](https://github.com/Kong/kong/issues/12241) + [KAG-3396](https://konghq.atlassian.net/browse/KAG-3396) + +- Eliminate asynchronous timer in syncQuery() to prevent hang risk + [#11900](https://github.com/Kong/kong/issues/11900) + [KAG-2913](https://konghq.atlassian.net/browse/KAG-2913) [FTI-5348](https://konghq.atlassian.net/browse/FTI-5348) + +- **tracing:** Fixed an issue where a DNS query failure would cause a tracing failure. + [#11935](https://github.com/Kong/kong/issues/11935) + [FTI-5544](https://konghq.atlassian.net/browse/FTI-5544) + +- Expressions route in `http` and `stream` subsystem now have stricter validation. +Previously they share the same validation schema which means admin can configure expressions +route using fields like `http.path` even for stream routes. This is no longer allowed. + [#11914](https://github.com/Kong/kong/issues/11914) + [KAG-2961](https://konghq.atlassian.net/browse/KAG-2961) + +- **Tracing**: dns spans are now correctly generated for upstream dns queries (in addition to cosocket ones) + [#11996](https://github.com/Kong/kong/issues/11996) + [KAG-3057](https://konghq.atlassian.net/browse/KAG-3057) + +- Validate private and public key for `keys` entity to ensure they match each other. + [#11923](https://github.com/Kong/kong/issues/11923) + [KAG-390](https://konghq.atlassian.net/browse/KAG-390) + +- **proxy-wasm**: Fixed "previous plan already attached" error thrown when a filter triggers re-entrancy of the access handler. + [#12452](https://github.com/Kong/kong/issues/12452) + [KAG-3603](https://konghq.atlassian.net/browse/KAG-3603) +#### PDK + +- response.set_header support header argument with table array of string + [#12164](https://github.com/Kong/kong/issues/12164) + [FTI-5585](https://konghq.atlassian.net/browse/FTI-5585) + +- Fix an issue that when using kong.response.exit, the Transfer-Encoding header set by user is not removed + [#11936](https://github.com/Kong/kong/issues/11936) + [FTI-5028](https://konghq.atlassian.net/browse/FTI-5028) + +- **Plugin Server**: fix an issue where every request causes a new plugin instance to be created + [#12020](https://github.com/Kong/kong/issues/12020) + [KAG-2969](https://konghq.atlassian.net/browse/KAG-2969) +#### Plugin + +- Add missing WWW-Authenticate headers to 401 response in basic auth plugin. + [#11795](https://github.com/Kong/kong/issues/11795) + [KAG-321](https://konghq.atlassian.net/browse/KAG-321) + +- Enhance error responses for authentication failures in the Admin API + [#12456](https://github.com/Kong/kong/issues/12456) + [SEC-912](https://konghq.atlassian.net/browse/SEC-912) [KAG-1672](https://konghq.atlassian.net/browse/KAG-1672) + +- Expose metrics for serviceless routes + [#11781](https://github.com/Kong/kong/issues/11781) + [FTI-5065](https://konghq.atlassian.net/browse/FTI-5065) + +- **Rate Limiting**: fix to provide better accuracy in counters when sync_rate is used with the redis policy. + [#11859](https://github.com/Kong/kong/issues/11859) + [KAG-2906](https://konghq.atlassian.net/browse/KAG-2906) + +- **Rate Limiting**: fix an issuer where all counters are synced to the same DB at the same rate. + [#12003](https://github.com/Kong/kong/issues/12003) + [KAG-2904](https://konghq.atlassian.net/browse/KAG-2904) + +- **Datadog**: Fix a bug that datadog plugin is not triggered for serviceless routes. In this fix, datadog plugin is always triggered, and the value of tag `name`(service_name) is set as an empty value. + [#12068](https://github.com/Kong/kong/issues/12068) + [FTI-5576](https://konghq.atlassian.net/browse/FTI-5576) +#### Clustering + +- Fix a bug causing data-plane status updates to fail when an empty PING frame is received from a data-plane + [#11917](https://github.com/Kong/kong/issues/11917) + [KAG-2967](https://konghq.atlassian.net/browse/KAG-2967) +## Kong-Manager + + + + + + +### Features +#### Default + +- Added a JSON/YAML format preview for all entity forms. + [#157](https://github.com/Kong/kong-manager/issues/157) + + +- Adopted resigned basic components for better UI/UX. + [#131](https://github.com/Kong/kong-manager/issues/131) [#166](https://github.com/Kong/kong-manager/issues/166) + + +- Kong Manager and Konnect now share the same UI for plugin selection page and plugin form page. + [#143](https://github.com/Kong/kong-manager/issues/143) [#147](https://github.com/Kong/kong-manager/issues/147) + + +### Fixes +#### Default + +- Standardized notification text format. + [#140](https://github.com/Kong/kong-manager/issues/140) + diff --git a/changelog/3.6.0/kong-manager/entity_form_preview.yml b/changelog/3.6.0/kong-manager/entity_form_preview.yml new file mode 100644 index 00000000000..f9a78c5cc65 --- /dev/null +++ b/changelog/3.6.0/kong-manager/entity_form_preview.yml @@ -0,0 +1,3 @@ +message: Added a JSON/YAML format preview for all entity forms. +type: feature +githubs: [157] \ No newline at end of file diff --git a/changelog/3.6.0/kong-manager/redesigned_basic_components.yml b/changelog/3.6.0/kong-manager/redesigned_basic_components.yml new file mode 100644 index 00000000000..60ed4eb675d --- /dev/null +++ b/changelog/3.6.0/kong-manager/redesigned_basic_components.yml @@ -0,0 +1,3 @@ +message: Adopted resigned basic components for better UI/UX. +type: feature +githubs: [131, 166] \ No newline at end of file diff --git a/changelog/3.6.0/kong-manager/standardized_notification_format.yml b/changelog/3.6.0/kong-manager/standardized_notification_format.yml new file mode 100644 index 00000000000..5352fc41b99 --- /dev/null +++ b/changelog/3.6.0/kong-manager/standardized_notification_format.yml @@ -0,0 +1,3 @@ +message: Standardized notification text format. +type: bugfix +githubs: [140] \ No newline at end of file diff --git a/changelog/3.6.0/kong-manager/unified_plugin_pages.yml b/changelog/3.6.0/kong-manager/unified_plugin_pages.yml new file mode 100644 index 00000000000..3ab3c78a4a1 --- /dev/null +++ b/changelog/3.6.0/kong-manager/unified_plugin_pages.yml @@ -0,0 +1,3 @@ +message: Kong Manager and Konnect now share the same UI for plugin selection page and plugin form page. +type: feature +githubs: [143, 147] \ No newline at end of file diff --git a/changelog/3.6.0/kong/.gitkeep b/changelog/3.6.0/kong/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/unreleased/kong/add-ai-prompt-decorator-plugin.yml b/changelog/3.6.0/kong/add-ai-prompt-decorator-plugin.yml similarity index 100% rename from changelog/unreleased/kong/add-ai-prompt-decorator-plugin.yml rename to changelog/3.6.0/kong/add-ai-prompt-decorator-plugin.yml diff --git a/changelog/unreleased/kong/add-ai-prompt-guard-plugin.yml b/changelog/3.6.0/kong/add-ai-prompt-guard-plugin.yml similarity index 100% rename from changelog/unreleased/kong/add-ai-prompt-guard-plugin.yml rename to changelog/3.6.0/kong/add-ai-prompt-guard-plugin.yml diff --git a/changelog/unreleased/kong/add-ai-prompt-template-plugin.yml b/changelog/3.6.0/kong/add-ai-prompt-template-plugin.yml similarity index 100% rename from changelog/unreleased/kong/add-ai-prompt-template-plugin.yml rename to changelog/3.6.0/kong/add-ai-prompt-template-plugin.yml diff --git a/changelog/unreleased/kong/add-ai-proxy-plugin.yml b/changelog/3.6.0/kong/add-ai-proxy-plugin.yml similarity index 100% rename from changelog/unreleased/kong/add-ai-proxy-plugin.yml rename to changelog/3.6.0/kong/add-ai-proxy-plugin.yml diff --git a/changelog/unreleased/kong/add-ai-proxy-telemetry.yml b/changelog/3.6.0/kong/add-ai-proxy-telemetry.yml similarity index 100% rename from changelog/unreleased/kong/add-ai-proxy-telemetry.yml rename to changelog/3.6.0/kong/add-ai-proxy-telemetry.yml diff --git a/changelog/unreleased/kong/add-ai-request-transformer-plugin.yml b/changelog/3.6.0/kong/add-ai-request-transformer-plugin.yml similarity index 100% rename from changelog/unreleased/kong/add-ai-request-transformer-plugin.yml rename to changelog/3.6.0/kong/add-ai-request-transformer-plugin.yml diff --git a/changelog/unreleased/kong/add-ai-response-transformer-plugin.yml b/changelog/3.6.0/kong/add-ai-response-transformer-plugin.yml similarity index 100% rename from changelog/unreleased/kong/add-ai-response-transformer-plugin.yml rename to changelog/3.6.0/kong/add-ai-response-transformer-plugin.yml diff --git a/changelog/unreleased/kong/add-gateway-edition-to-root-endpoint-admin-api.yml b/changelog/3.6.0/kong/add-gateway-edition-to-root-endpoint-admin-api.yml similarity index 100% rename from changelog/unreleased/kong/add-gateway-edition-to-root-endpoint-admin-api.yml rename to changelog/3.6.0/kong/add-gateway-edition-to-root-endpoint-admin-api.yml diff --git a/changelog/unreleased/kong/add_ngx_brotli_module.yml b/changelog/3.6.0/kong/add_ngx_brotli_module.yml similarity index 100% rename from changelog/unreleased/kong/add_ngx_brotli_module.yml rename to changelog/3.6.0/kong/add_ngx_brotli_module.yml diff --git a/changelog/unreleased/kong/atc_reuse_context.yml b/changelog/3.6.0/kong/atc_reuse_context.yml similarity index 100% rename from changelog/unreleased/kong/atc_reuse_context.yml rename to changelog/3.6.0/kong/atc_reuse_context.yml diff --git a/changelog/unreleased/kong/basic_www_authenticate.yml b/changelog/3.6.0/kong/basic_www_authenticate.yml similarity index 100% rename from changelog/unreleased/kong/basic_www_authenticate.yml rename to changelog/3.6.0/kong/basic_www_authenticate.yml diff --git a/changelog/unreleased/kong/bump-atc-router.yml b/changelog/3.6.0/kong/bump-atc-router.yml similarity index 100% rename from changelog/unreleased/kong/bump-atc-router.yml rename to changelog/3.6.0/kong/bump-atc-router.yml diff --git a/changelog/unreleased/kong/bump-cocurrency-limit-of-timer-ng.yml b/changelog/3.6.0/kong/bump-cocurrency-limit-of-timer-ng.yml similarity index 100% rename from changelog/unreleased/kong/bump-cocurrency-limit-of-timer-ng.yml rename to changelog/3.6.0/kong/bump-cocurrency-limit-of-timer-ng.yml diff --git a/changelog/unreleased/kong/bump-lapis-1.16.0.1.yml b/changelog/3.6.0/kong/bump-lapis-1.16.0.1.yml similarity index 100% rename from changelog/unreleased/kong/bump-lapis-1.16.0.1.yml rename to changelog/3.6.0/kong/bump-lapis-1.16.0.1.yml diff --git a/changelog/unreleased/kong/bump-lpeg-1.1.0.yml b/changelog/3.6.0/kong/bump-lpeg-1.1.0.yml similarity index 100% rename from changelog/unreleased/kong/bump-lpeg-1.1.0.yml rename to changelog/3.6.0/kong/bump-lpeg-1.1.0.yml diff --git a/changelog/unreleased/kong/bump-lua-messagepack-0.5.3.yml b/changelog/3.6.0/kong/bump-lua-messagepack-0.5.3.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-messagepack-0.5.3.yml rename to changelog/3.6.0/kong/bump-lua-messagepack-0.5.3.yml diff --git a/changelog/unreleased/kong/bump-lua-messagepack-0.5.4.yml b/changelog/3.6.0/kong/bump-lua-messagepack-0.5.4.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-messagepack-0.5.4.yml rename to changelog/3.6.0/kong/bump-lua-messagepack-0.5.4.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-aws-1.3.6.yml b/changelog/3.6.0/kong/bump-lua-resty-aws-1.3.6.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-resty-aws-1.3.6.yml rename to changelog/3.6.0/kong/bump-lua-resty-aws-1.3.6.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-healthcheck-3.0.1.yml b/changelog/3.6.0/kong/bump-lua-resty-healthcheck-3.0.1.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-resty-healthcheck-3.0.1.yml rename to changelog/3.6.0/kong/bump-lua-resty-healthcheck-3.0.1.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-lmdb-1.4.1.yml b/changelog/3.6.0/kong/bump-lua-resty-lmdb-1.4.1.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-resty-lmdb-1.4.1.yml rename to changelog/3.6.0/kong/bump-lua-resty-lmdb-1.4.1.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-timer-ng-to-0.2.6.yml b/changelog/3.6.0/kong/bump-lua-resty-timer-ng-to-0.2.6.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-resty-timer-ng-to-0.2.6.yml rename to changelog/3.6.0/kong/bump-lua-resty-timer-ng-to-0.2.6.yml diff --git a/changelog/unreleased/kong/bump-ngx-wasm-module.yml b/changelog/3.6.0/kong/bump-ngx-wasm-module.yml similarity index 100% rename from changelog/unreleased/kong/bump-ngx-wasm-module.yml rename to changelog/3.6.0/kong/bump-ngx-wasm-module.yml diff --git a/changelog/unreleased/kong/bump-openresty.yml b/changelog/3.6.0/kong/bump-openresty.yml similarity index 100% rename from changelog/unreleased/kong/bump-openresty.yml rename to changelog/3.6.0/kong/bump-openresty.yml diff --git a/changelog/unreleased/kong/bump-openssl.yml b/changelog/3.6.0/kong/bump-openssl.yml similarity index 100% rename from changelog/unreleased/kong/bump-openssl.yml rename to changelog/3.6.0/kong/bump-openssl.yml diff --git a/changelog/unreleased/kong/bump-resty-openssl.yml b/changelog/3.6.0/kong/bump-resty-openssl.yml similarity index 100% rename from changelog/unreleased/kong/bump-resty-openssl.yml rename to changelog/3.6.0/kong/bump-resty-openssl.yml diff --git a/changelog/unreleased/kong/bump-wasmtime.yml b/changelog/3.6.0/kong/bump-wasmtime.yml similarity index 100% rename from changelog/unreleased/kong/bump-wasmtime.yml rename to changelog/3.6.0/kong/bump-wasmtime.yml diff --git a/changelog/unreleased/kong/bump_dns_stale_ttl.yml b/changelog/3.6.0/kong/bump_dns_stale_ttl.yml similarity index 100% rename from changelog/unreleased/kong/bump_dns_stale_ttl.yml rename to changelog/3.6.0/kong/bump_dns_stale_ttl.yml diff --git a/changelog/unreleased/kong/bump_ngx_brotli.yml b/changelog/3.6.0/kong/bump_ngx_brotli.yml similarity index 100% rename from changelog/unreleased/kong/bump_ngx_brotli.yml rename to changelog/3.6.0/kong/bump_ngx_brotli.yml diff --git a/changelog/unreleased/kong/ca_certificates_reference_check.yml b/changelog/3.6.0/kong/ca_certificates_reference_check.yml similarity index 100% rename from changelog/unreleased/kong/ca_certificates_reference_check.yml rename to changelog/3.6.0/kong/ca_certificates_reference_check.yml diff --git a/changelog/unreleased/kong/clustering-empty-data-plane-hash-fix.yml b/changelog/3.6.0/kong/clustering-empty-data-plane-hash-fix.yml similarity index 100% rename from changelog/unreleased/kong/clustering-empty-data-plane-hash-fix.yml rename to changelog/3.6.0/kong/clustering-empty-data-plane-hash-fix.yml diff --git a/changelog/unreleased/kong/cookie-name-validator.yml b/changelog/3.6.0/kong/cookie-name-validator.yml similarity index 100% rename from changelog/unreleased/kong/cookie-name-validator.yml rename to changelog/3.6.0/kong/cookie-name-validator.yml diff --git a/changelog/unreleased/kong/cp-expose-dp-cert-details.yml b/changelog/3.6.0/kong/cp-expose-dp-cert-details.yml similarity index 100% rename from changelog/unreleased/kong/cp-expose-dp-cert-details.yml rename to changelog/3.6.0/kong/cp-expose-dp-cert-details.yml diff --git a/changelog/unreleased/kong/dao-pk-as-entity.yml b/changelog/3.6.0/kong/dao-pk-as-entity.yml similarity index 100% rename from changelog/unreleased/kong/dao-pk-as-entity.yml rename to changelog/3.6.0/kong/dao-pk-as-entity.yml diff --git a/changelog/unreleased/kong/debian-12-support.yml b/changelog/3.6.0/kong/debian-12-support.yml similarity index 100% rename from changelog/unreleased/kong/debian-12-support.yml rename to changelog/3.6.0/kong/debian-12-support.yml diff --git a/changelog/unreleased/kong/declarative_config_fix.yml b/changelog/3.6.0/kong/declarative_config_fix.yml similarity index 100% rename from changelog/unreleased/kong/declarative_config_fix.yml rename to changelog/3.6.0/kong/declarative_config_fix.yml diff --git a/changelog/unreleased/kong/default_status_port.yml b/changelog/3.6.0/kong/default_status_port.yml similarity index 100% rename from changelog/unreleased/kong/default_status_port.yml rename to changelog/3.6.0/kong/default_status_port.yml diff --git a/changelog/unreleased/kong/deps_bump_lua_resty_healthcheck.yml b/changelog/3.6.0/kong/deps_bump_lua_resty_healthcheck.yml similarity index 100% rename from changelog/unreleased/kong/deps_bump_lua_resty_healthcheck.yml rename to changelog/3.6.0/kong/deps_bump_lua_resty_healthcheck.yml diff --git a/changelog/unreleased/kong/display-warning-message-for-km-misconfig.yml b/changelog/3.6.0/kong/display-warning-message-for-km-misconfig.yml similarity index 100% rename from changelog/unreleased/kong/display-warning-message-for-km-misconfig.yml rename to changelog/3.6.0/kong/display-warning-message-for-km-misconfig.yml diff --git a/changelog/unreleased/kong/enhance_admin_api_auth_error_response.yml b/changelog/3.6.0/kong/enhance_admin_api_auth_error_response.yml similarity index 100% rename from changelog/unreleased/kong/enhance_admin_api_auth_error_response.yml rename to changelog/3.6.0/kong/enhance_admin_api_auth_error_response.yml diff --git a/changelog/unreleased/kong/error_handler_494.yml b/changelog/3.6.0/kong/error_handler_494.yml similarity index 100% rename from changelog/unreleased/kong/error_handler_494.yml rename to changelog/3.6.0/kong/error_handler_494.yml diff --git a/changelog/unreleased/kong/expression_http_headers_sensitive.yml b/changelog/3.6.0/kong/expression_http_headers_sensitive.yml similarity index 100% rename from changelog/unreleased/kong/expression_http_headers_sensitive.yml rename to changelog/3.6.0/kong/expression_http_headers_sensitive.yml diff --git a/changelog/unreleased/kong/expressions_not_operator.yml b/changelog/3.6.0/kong/expressions_not_operator.yml similarity index 100% rename from changelog/unreleased/kong/expressions_not_operator.yml rename to changelog/3.6.0/kong/expressions_not_operator.yml diff --git a/changelog/unreleased/kong/feat-add-cipher-to-the-intermediate.yml b/changelog/3.6.0/kong/feat-add-cipher-to-the-intermediate.yml similarity index 100% rename from changelog/unreleased/kong/feat-add-cipher-to-the-intermediate.yml rename to changelog/3.6.0/kong/feat-add-cipher-to-the-intermediate.yml diff --git a/changelog/unreleased/kong/fix-declarative-config-flattened-data-loss.yml b/changelog/3.6.0/kong/fix-declarative-config-flattened-data-loss.yml similarity index 100% rename from changelog/unreleased/kong/fix-declarative-config-flattened-data-loss.yml rename to changelog/3.6.0/kong/fix-declarative-config-flattened-data-loss.yml diff --git a/changelog/unreleased/kong/fix-error-message-print.yml b/changelog/3.6.0/kong/fix-error-message-print.yml similarity index 100% rename from changelog/unreleased/kong/fix-error-message-print.yml rename to changelog/3.6.0/kong/fix-error-message-print.yml diff --git a/changelog/unreleased/kong/fix-ldoc-intermittent-fail.yml b/changelog/3.6.0/kong/fix-ldoc-intermittent-fail.yml similarity index 100% rename from changelog/unreleased/kong/fix-ldoc-intermittent-fail.yml rename to changelog/3.6.0/kong/fix-ldoc-intermittent-fail.yml diff --git a/changelog/unreleased/kong/fix-pdk-response-set-header-with-table.yml b/changelog/3.6.0/kong/fix-pdk-response-set-header-with-table.yml similarity index 100% rename from changelog/unreleased/kong/fix-pdk-response-set-header-with-table.yml rename to changelog/3.6.0/kong/fix-pdk-response-set-header-with-table.yml diff --git a/changelog/unreleased/kong/fix-upstream-uri-azure-function-plugin.yml b/changelog/3.6.0/kong/fix-upstream-uri-azure-function-plugin.yml similarity index 100% rename from changelog/unreleased/kong/fix-upstream-uri-azure-function-plugin.yml rename to changelog/3.6.0/kong/fix-upstream-uri-azure-function-plugin.yml diff --git a/changelog/unreleased/kong/fix-wasm-module-branch.yml b/changelog/3.6.0/kong/fix-wasm-module-branch.yml similarity index 100% rename from changelog/unreleased/kong/fix-wasm-module-branch.yml rename to changelog/3.6.0/kong/fix-wasm-module-branch.yml diff --git a/changelog/unreleased/kong/fix_dns_blocking.yml b/changelog/3.6.0/kong/fix_dns_blocking.yml similarity index 100% rename from changelog/unreleased/kong/fix_dns_blocking.yml rename to changelog/3.6.0/kong/fix_dns_blocking.yml diff --git a/changelog/unreleased/kong/fix_dns_disable_dns_no_sync.yml b/changelog/3.6.0/kong/fix_dns_disable_dns_no_sync.yml similarity index 100% rename from changelog/unreleased/kong/fix_dns_disable_dns_no_sync.yml rename to changelog/3.6.0/kong/fix_dns_disable_dns_no_sync.yml diff --git a/changelog/unreleased/kong/fix_dns_instrument_error_handling.yml b/changelog/3.6.0/kong/fix_dns_instrument_error_handling.yml similarity index 100% rename from changelog/unreleased/kong/fix_dns_instrument_error_handling.yml rename to changelog/3.6.0/kong/fix_dns_instrument_error_handling.yml diff --git a/changelog/unreleased/kong/inject-nginx-directives-location.yml b/changelog/3.6.0/kong/inject-nginx-directives-location.yml similarity index 100% rename from changelog/unreleased/kong/inject-nginx-directives-location.yml rename to changelog/3.6.0/kong/inject-nginx-directives-location.yml diff --git a/changelog/unreleased/kong/introduce_lmdb_validation_tag.yml b/changelog/3.6.0/kong/introduce_lmdb_validation_tag.yml similarity index 100% rename from changelog/unreleased/kong/introduce_lmdb_validation_tag.yml rename to changelog/3.6.0/kong/introduce_lmdb_validation_tag.yml diff --git a/changelog/unreleased/kong/log-serializer-source-property.yml b/changelog/3.6.0/kong/log-serializer-source-property.yml similarity index 100% rename from changelog/unreleased/kong/log-serializer-source-property.yml rename to changelog/3.6.0/kong/log-serializer-source-property.yml diff --git a/changelog/unreleased/kong/optimize_keepalive_parameters.yml b/changelog/3.6.0/kong/optimize_keepalive_parameters.yml similarity index 100% rename from changelog/unreleased/kong/optimize_keepalive_parameters.yml rename to changelog/3.6.0/kong/optimize_keepalive_parameters.yml diff --git a/changelog/unreleased/kong/pdk-json-encoding-numbers-precision.yml b/changelog/3.6.0/kong/pdk-json-encoding-numbers-precision.yml similarity index 100% rename from changelog/unreleased/kong/pdk-json-encoding-numbers-precision.yml rename to changelog/3.6.0/kong/pdk-json-encoding-numbers-precision.yml diff --git a/changelog/unreleased/kong/pdk-response-send-remove-transfer-encoding.yml b/changelog/3.6.0/kong/pdk-response-send-remove-transfer-encoding.yml similarity index 100% rename from changelog/unreleased/kong/pdk-response-send-remove-transfer-encoding.yml rename to changelog/3.6.0/kong/pdk-response-send-remove-transfer-encoding.yml diff --git a/changelog/unreleased/kong/perf-tracing-from-timers.yml b/changelog/3.6.0/kong/perf-tracing-from-timers.yml similarity index 100% rename from changelog/unreleased/kong/perf-tracing-from-timers.yml rename to changelog/3.6.0/kong/perf-tracing-from-timers.yml diff --git a/changelog/unreleased/kong/plugin-server-instance-leak.yml b/changelog/3.6.0/kong/plugin-server-instance-leak.yml similarity index 100% rename from changelog/unreleased/kong/plugin-server-instance-leak.yml rename to changelog/3.6.0/kong/plugin-server-instance-leak.yml diff --git a/changelog/unreleased/kong/postremove.yml b/changelog/3.6.0/kong/postremove.yml similarity index 100% rename from changelog/unreleased/kong/postremove.yml rename to changelog/3.6.0/kong/postremove.yml diff --git a/changelog/unreleased/kong/prometheus_expose_no_service_metrics.yml b/changelog/3.6.0/kong/prometheus_expose_no_service_metrics.yml similarity index 100% rename from changelog/unreleased/kong/prometheus_expose_no_service_metrics.yml rename to changelog/3.6.0/kong/prometheus_expose_no_service_metrics.yml diff --git a/changelog/unreleased/kong/rate-limiting-fix-redis-sync-rate.yml b/changelog/3.6.0/kong/rate-limiting-fix-redis-sync-rate.yml similarity index 100% rename from changelog/unreleased/kong/rate-limiting-fix-redis-sync-rate.yml rename to changelog/3.6.0/kong/rate-limiting-fix-redis-sync-rate.yml diff --git a/changelog/unreleased/kong/respect-custom-proxy_access_log.yml b/changelog/3.6.0/kong/respect-custom-proxy_access_log.yml similarity index 100% rename from changelog/unreleased/kong/respect-custom-proxy_access_log.yml rename to changelog/3.6.0/kong/respect-custom-proxy_access_log.yml diff --git a/changelog/unreleased/kong/rl-shared-sync-timer.yml b/changelog/3.6.0/kong/rl-shared-sync-timer.yml similarity index 100% rename from changelog/unreleased/kong/rl-shared-sync-timer.yml rename to changelog/3.6.0/kong/rl-shared-sync-timer.yml diff --git a/changelog/unreleased/kong/router-report-yield.yml b/changelog/3.6.0/kong/router-report-yield.yml similarity index 100% rename from changelog/unreleased/kong/router-report-yield.yml rename to changelog/3.6.0/kong/router-report-yield.yml diff --git a/changelog/unreleased/kong/serviceless-routes-still-trigger-datalog-plugin.yml b/changelog/3.6.0/kong/serviceless-routes-still-trigger-datalog-plugin.yml similarity index 100% rename from changelog/unreleased/kong/serviceless-routes-still-trigger-datalog-plugin.yml rename to changelog/3.6.0/kong/serviceless-routes-still-trigger-datalog-plugin.yml diff --git a/changelog/unreleased/kong/standardize-redis-conifguration-acme.yml b/changelog/3.6.0/kong/standardize-redis-conifguration-acme.yml similarity index 100% rename from changelog/unreleased/kong/standardize-redis-conifguration-acme.yml rename to changelog/3.6.0/kong/standardize-redis-conifguration-acme.yml diff --git a/changelog/unreleased/kong/standardize-redis-conifguration-rate-limiting.yml b/changelog/3.6.0/kong/standardize-redis-conifguration-rate-limiting.yml similarity index 100% rename from changelog/unreleased/kong/standardize-redis-conifguration-rate-limiting.yml rename to changelog/3.6.0/kong/standardize-redis-conifguration-rate-limiting.yml diff --git a/changelog/unreleased/kong/standardize-redis-conifguration-response-rl.yml b/changelog/3.6.0/kong/standardize-redis-conifguration-response-rl.yml similarity index 100% rename from changelog/unreleased/kong/standardize-redis-conifguration-response-rl.yml rename to changelog/3.6.0/kong/standardize-redis-conifguration-response-rl.yml diff --git a/changelog/unreleased/kong/subsystems_do_not_share_router_schemas.yml b/changelog/3.6.0/kong/subsystems_do_not_share_router_schemas.yml similarity index 100% rename from changelog/unreleased/kong/subsystems_do_not_share_router_schemas.yml rename to changelog/3.6.0/kong/subsystems_do_not_share_router_schemas.yml diff --git a/changelog/unreleased/kong/support_http_path_segments_field.yml b/changelog/3.6.0/kong/support_http_path_segments_field.yml similarity index 100% rename from changelog/unreleased/kong/support_http_path_segments_field.yml rename to changelog/3.6.0/kong/support_http_path_segments_field.yml diff --git a/changelog/unreleased/kong/support_net_src_dst_field_in_expression.yml b/changelog/3.6.0/kong/support_net_src_dst_field_in_expression.yml similarity index 100% rename from changelog/unreleased/kong/support_net_src_dst_field_in_expression.yml rename to changelog/3.6.0/kong/support_net_src_dst_field_in_expression.yml diff --git a/changelog/unreleased/kong/tracing-dns-query-patch.yml b/changelog/3.6.0/kong/tracing-dns-query-patch.yml similarity index 100% rename from changelog/unreleased/kong/tracing-dns-query-patch.yml rename to changelog/3.6.0/kong/tracing-dns-query-patch.yml diff --git a/changelog/unreleased/kong/tracing-sampling-rate-scope.yml b/changelog/3.6.0/kong/tracing-sampling-rate-scope.yml similarity index 100% rename from changelog/unreleased/kong/tracing-sampling-rate-scope.yml rename to changelog/3.6.0/kong/tracing-sampling-rate-scope.yml diff --git a/changelog/unreleased/kong/validate_private_key.yml b/changelog/3.6.0/kong/validate_private_key.yml similarity index 100% rename from changelog/unreleased/kong/validate_private_key.yml rename to changelog/3.6.0/kong/validate_private_key.yml diff --git a/changelog/unreleased/kong/wasm-attach.yml b/changelog/3.6.0/kong/wasm-attach.yml similarity index 100% rename from changelog/unreleased/kong/wasm-attach.yml rename to changelog/3.6.0/kong/wasm-attach.yml diff --git a/changelog/unreleased/kong/wasm-dynamic-properties.yml b/changelog/3.6.0/kong/wasm-dynamic-properties.yml similarity index 100% rename from changelog/unreleased/kong/wasm-dynamic-properties.yml rename to changelog/3.6.0/kong/wasm-dynamic-properties.yml diff --git a/changelog/unreleased/kong/wasm-injected-shm-kv.yml b/changelog/3.6.0/kong/wasm-injected-shm-kv.yml similarity index 100% rename from changelog/unreleased/kong/wasm-injected-shm-kv.yml rename to changelog/3.6.0/kong/wasm-injected-shm-kv.yml From c190632d08d2512701a95802b033bcc0a8828821 Mon Sep 17 00:00:00 2001 From: Enrique Garcia Cota Date: Fri, 2 Feb 2024 22:54:06 +0100 Subject: [PATCH 23/91] docs(changelog): expand upstream_keepalive changelog entry --- changelog/3.6.0/3.6.0.md | 2 +- changelog/3.6.0/kong/optimize_keepalive_parameters.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog/3.6.0/3.6.0.md b/changelog/3.6.0/3.6.0.md index 58f0a362c01..6525493ce62 100644 --- a/changelog/3.6.0/3.6.0.md +++ b/changelog/3.6.0/3.6.0.md @@ -17,7 +17,7 @@ [#12087](https://github.com/Kong/kong/issues/12087) [KAG-3080](https://konghq.atlassian.net/browse/KAG-3080) -- Bumped default values of `nginx_http_keepalive_requests` and `upstream_keepalive_max_requests` to `10000`. +- Bumped default values of `nginx_http_keepalive_requests` and `upstream_keepalive_max_requests` to `10000`. These changes are optimized to work better in systems with high throughput. In a low-throughput setting, these new settings may have visible effects in loadbalancing - it can take more requests to start using all the upstreams than before. [#12223](https://github.com/Kong/kong/issues/12223) [KAG-3360](https://konghq.atlassian.net/browse/KAG-3360) #### Core diff --git a/changelog/3.6.0/kong/optimize_keepalive_parameters.yml b/changelog/3.6.0/kong/optimize_keepalive_parameters.yml index 49ec8baf6d4..22725a15d11 100644 --- a/changelog/3.6.0/kong/optimize_keepalive_parameters.yml +++ b/changelog/3.6.0/kong/optimize_keepalive_parameters.yml @@ -1,3 +1,3 @@ -message: Bumped default values of `nginx_http_keepalive_requests` and `upstream_keepalive_max_requests` to `10000`. +message: Bumped default values of `nginx_http_keepalive_requests` and `upstream_keepalive_max_requests` to `10000`. These changes are optimized to work better in systems with high throughput. In a low-throughput setting, these new settings may have visible effects in loadbalancing - it can take more requests to start using all the upstreams than before. type: performance scope: Configuration From b584dee68a2a1fbe7c20d700203f328b3c60952e Mon Sep 17 00:00:00 2001 From: Water-Melon Date: Mon, 5 Feb 2024 15:07:55 +0000 Subject: [PATCH 24/91] chore(changelog): breaking change for OpenSSL key width --- changelog/3.6.0/3.6.0.md | 10 ++++++++++ .../3.6.0/kong/bump_openssl_from_3_1_4_to_3_2_0.yml | 8 ++++++++ 2 files changed, 18 insertions(+) create mode 100644 changelog/3.6.0/kong/bump_openssl_from_3_1_4_to_3_2_0.yml diff --git a/changelog/3.6.0/3.6.0.md b/changelog/3.6.0/3.6.0.md index 6525493ce62..04224e567e6 100644 --- a/changelog/3.6.0/3.6.0.md +++ b/changelog/3.6.0/3.6.0.md @@ -37,6 +37,16 @@ - **BREAKING:** To avoid ambiguity with other Wasm-related nginx.conf directives, the prefix for Wasm `shm_kv` nginx.conf directives was changed from `nginx_wasm_shm_` to `nginx_wasm_shm_kv_` [#11919](https://github.com/Kong/kong/issues/11919) [KAG-2355](https://konghq.atlassian.net/browse/KAG-2355) + +- In OpenSSL 3.2, the default SSL/TLS security level has been changed from 1 to 2. + Which means security level set to 112 bits of security. As a result + RSA, DSA and DH keys shorter than 2048 bits and ECC keys shorter than + 224 bits are prohibited. In addition to the level 1 exclusions any cipher + suite using RC4 is also prohibited. SSL version 3 is also not allowed. + Compression is disabled. + [#7714](https://github.com/Kong/kong/issues/7714) + [KAG-3459](https://konghq.atlassian.net/browse/KAG-3459) + #### Plugin - **azure-functions**: azure-functions plugin now eliminates upstream/request URI and only use `routeprefix` configuration field to construct request path when requesting Azure API diff --git a/changelog/3.6.0/kong/bump_openssl_from_3_1_4_to_3_2_0.yml b/changelog/3.6.0/kong/bump_openssl_from_3_1_4_to_3_2_0.yml new file mode 100644 index 00000000000..ac625d9db04 --- /dev/null +++ b/changelog/3.6.0/kong/bump_openssl_from_3_1_4_to_3_2_0.yml @@ -0,0 +1,8 @@ +message: >- + In OpenSSL 3.2, the default SSL/TLS security level has been changed from 1 to 2. + Which means security level set to 112 bits of security. As a result + RSA, DSA and DH keys shorter than 2048 bits and ECC keys shorter than + 224 bits are prohibited. In addition to the level 1 exclusions any cipher + suite using RC4 is also prohibited. SSL version 3 is also not allowed. + Compression is disabled. +type: breaking_change From c76b943440b4f45be843faf70d31a5fea62126d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Mon, 5 Feb 2024 15:44:57 +0100 Subject: [PATCH 25/91] fix(dao): allow shorthand fields to be in response Shorthand fields are stripped out of response but we're using them when we want to rename some of the fields. This commit adds an option `expand_shortfields` as well as some some options to shorthand_fields schema that would allow us to include them back in the schema while using the latest data KAG-3677 --- kong/db/dao/init.lua | 16 ++++++++- kong/db/dao/plugins.lua | 2 ++ kong/db/schema/init.lua | 19 +++++++++- kong/db/schema/metaschema.lua | 2 ++ kong/plugins/acme/schema.lua | 16 +++++++++ kong/plugins/rate-limiting/schema.lua | 36 +++++++++++++++++++ kong/plugins/response-ratelimiting/schema.lua | 36 +++++++++++++++++++ .../09-hybrid_mode/09-config-compat_spec.lua | 17 +++++++-- .../23-rate-limiting/05-integration_spec.lua | 11 ++++++ .../05-integration_spec.lua | 11 ++++++ .../29-acme/05-redis_storage_spec.lua | 6 ++++ 11 files changed, 167 insertions(+), 5 deletions(-) diff --git a/kong/db/dao/init.lua b/kong/db/dao/init.lua index 31f6414f65e..fdbf928bdab 100644 --- a/kong/db/dao/init.lua +++ b/kong/db/dao/init.lua @@ -1011,6 +1011,10 @@ function DAO:select(pk_or_entity, options) end local err + if options == nil or options.expand_shorthands == nil then + options = options or {} + options.expand_shorthands = true + end row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -1064,6 +1068,10 @@ function DAO:page(size, offset, options) end local entities, err + if options == nil or options.expand_shorthands == nil then + options = options or {} + options.expand_shorthands = true + end entities, err, err_t = self:rows_to_entities(rows, options) if not entities then return nil, err, err_t @@ -1148,6 +1156,8 @@ function DAO:insert(entity, options) end local ws_id = row.ws_id + options = options or {} + options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -1201,6 +1211,8 @@ function DAO:update(pk_or_entity, entity, options) end local ws_id = row.ws_id + options = options or {} + options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -1254,6 +1266,8 @@ function DAO:upsert(pk_or_entity, entity, options) end local ws_id = row.ws_id + options = options or {} + options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -1443,7 +1457,7 @@ function DAO:row_to_entity(row, options) end end - local entity, errors = self.schema:process_auto_fields(transformed_entity or row, "select", nulls) + local entity, errors = self.schema:process_auto_fields(transformed_entity or row, "select", nulls, options) if not entity then local err_t = self.errors:schema_violation(errors) return nil, tostring(err_t), err_t diff --git a/kong/db/dao/plugins.lua b/kong/db/dao/plugins.lua index 86a56fc416e..d94ff7d1cc2 100644 --- a/kong/db/dao/plugins.lua +++ b/kong/db/dao/plugins.lua @@ -89,6 +89,8 @@ end function Plugins:update(primary_key, entity, options) + options = options or {} + options.expand_shorthands = false local rbw_entity = self.super.select(self, primary_key, options) -- ignore errors if rbw_entity then entity = self.schema:merge_values(entity, rbw_entity) diff --git a/kong/db/schema/init.lua b/kong/db/schema/init.lua index 54a1883ac20..ea6c673e8ba 100644 --- a/kong/db/schema/init.lua +++ b/kong/db/schema/init.lua @@ -1680,6 +1680,10 @@ function Schema:process_auto_fields(data, context, nulls, opts) end end end + + if is_select and sdata.include_in_output and opts.expand_shorthands then + data[sname] = sdata.translate_backwards(data) + end end if has_errs then return nil, errs @@ -1908,7 +1912,20 @@ function Schema:process_auto_fields(data, context, nulls, opts) elseif not ((key == "ttl" and self.ttl) or (key == "ws_id" and show_ws)) then - data[key] = nil + + local should_be_in_ouput = false + + if self.shorthand_fields then + for _, shorthand_field in ipairs(self.shorthand_fields) do + if shorthand_field[key] and shorthand_field[key].include_in_output then + should_be_in_ouput = is_select + end + end + end + + if not should_be_in_ouput then + data[key] = nil + end end end diff --git a/kong/db/schema/metaschema.lua b/kong/db/schema/metaschema.lua index cb2c9eafba4..36bb8747ed2 100644 --- a/kong/db/schema/metaschema.lua +++ b/kong/db/schema/metaschema.lua @@ -683,6 +683,8 @@ local function make_shorthand_field_schema() shorthand_field_schema[1] = { type = { type = "string", one_of = shorthand_field_types, required = true }, } insert(shorthand_field_schema, { func = { type = "function", required = true } }) + insert(shorthand_field_schema, { translate_backwards = { type = "function", required = false } }) + insert(shorthand_field_schema, { include_in_output = { type = "boolean", required = false, default = false } }) return shorthand_field_schema end diff --git a/kong/plugins/acme/schema.lua b/kong/plugins/acme/schema.lua index 37a4bb99efd..2cbf4dd5940 100644 --- a/kong/plugins/acme/schema.lua +++ b/kong/plugins/acme/schema.lua @@ -42,6 +42,10 @@ local LEGACY_SCHEMA_TRANSLATIONS = { { auth = { type = "string", len_min = 0, + include_in_output = true, + translate_backwards = function(instance) + return instance.password + end, func = function(value) deprecation("acme: config.storage_config.redis.auth is deprecated, please use config.storage_config.redis.password instead", { after = "4.0", }) @@ -50,6 +54,10 @@ local LEGACY_SCHEMA_TRANSLATIONS = { }}, { ssl_server_name = { type = "string", + include_in_output = true, + translate_backwards = function(instance) + return instance.server_name + end, func = function(value) deprecation("acme: config.storage_config.redis.ssl_server_name is deprecated, please use config.storage_config.redis.server_name instead", { after = "4.0", }) @@ -59,6 +67,10 @@ local LEGACY_SCHEMA_TRANSLATIONS = { { namespace = { type = "string", len_min = 0, + include_in_output = true, + translate_backwards = function(instance) + return instance.extra_options.namespace + end, func = function(value) deprecation("acme: config.storage_config.redis.namespace is deprecated, please use config.storage_config.redis.extra_options.namespace instead", { after = "4.0", }) @@ -67,6 +79,10 @@ local LEGACY_SCHEMA_TRANSLATIONS = { }}, { scan_count = { type = "integer", + include_in_output = true, + translate_backwards = function(instance) + return instance.extra_options.scan_count + end, func = function(value) deprecation("acme: config.storage_config.redis.scan_count is deprecated, please use config.storage_config.redis.extra_options.scan_count instead", { after = "4.0", }) diff --git a/kong/plugins/rate-limiting/schema.lua b/kong/plugins/rate-limiting/schema.lua index d871017ef98..898d44e416b 100644 --- a/kong/plugins/rate-limiting/schema.lua +++ b/kong/plugins/rate-limiting/schema.lua @@ -103,6 +103,10 @@ return { -- TODO: deprecated forms, to be removed in Kong 4.0 { redis_host = { type = "string", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.host + end, func = function(value) deprecation("rate-limiting: config.redis_host is deprecated, please use config.redis.host instead", { after = "4.0", }) @@ -111,6 +115,10 @@ return { } }, { redis_port = { type = "integer", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.port + end, func = function(value) deprecation("rate-limiting: config.redis_port is deprecated, please use config.redis.port instead", { after = "4.0", }) @@ -120,6 +128,10 @@ return { { redis_password = { type = "string", len_min = 0, + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.password + end, func = function(value) deprecation("rate-limiting: config.redis_password is deprecated, please use config.redis.password instead", { after = "4.0", }) @@ -128,6 +140,10 @@ return { } }, { redis_username = { type = "string", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.username + end, func = function(value) deprecation("rate-limiting: config.redis_username is deprecated, please use config.redis.username instead", { after = "4.0", }) @@ -136,6 +152,10 @@ return { } }, { redis_ssl = { type = "boolean", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.ssl + end, func = function(value) deprecation("rate-limiting: config.redis_ssl is deprecated, please use config.redis.ssl instead", { after = "4.0", }) @@ -144,6 +164,10 @@ return { } }, { redis_ssl_verify = { type = "boolean", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.ssl_verify + end, func = function(value) deprecation("rate-limiting: config.redis_ssl_verify is deprecated, please use config.redis.ssl_verify instead", { after = "4.0", }) @@ -152,6 +176,10 @@ return { } }, { redis_server_name = { type = "string", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.server_name + end, func = function(value) deprecation("rate-limiting: config.redis_server_name is deprecated, please use config.redis.server_name instead", { after = "4.0", }) @@ -160,6 +188,10 @@ return { } }, { redis_timeout = { type = "integer", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.timeout + end, func = function(value) deprecation("rate-limiting: config.redis_timeout is deprecated, please use config.redis.timeout instead", { after = "4.0", }) @@ -168,6 +200,10 @@ return { } }, { redis_database = { type = "integer", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.database + end, func = function(value) deprecation("rate-limiting: config.redis_database is deprecated, please use config.redis.database instead", { after = "4.0", }) diff --git a/kong/plugins/response-ratelimiting/schema.lua b/kong/plugins/response-ratelimiting/schema.lua index a6e40163b6c..0c45f0e51c5 100644 --- a/kong/plugins/response-ratelimiting/schema.lua +++ b/kong/plugins/response-ratelimiting/schema.lua @@ -142,6 +142,10 @@ return { -- TODO: deprecated forms, to be removed in Kong 4.0 { redis_host = { type = "string", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.host + end, func = function(value) deprecation("response-ratelimiting: config.redis_host is deprecated, please use config.redis.host instead", { after = "4.0", }) @@ -150,6 +154,10 @@ return { } }, { redis_port = { type = "integer", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.port + end, func = function(value) deprecation("response-ratelimiting: config.redis_port is deprecated, please use config.redis.port instead", { after = "4.0", }) @@ -159,6 +167,10 @@ return { { redis_password = { type = "string", len_min = 0, + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.password + end, func = function(value) deprecation("response-ratelimiting: config.redis_password is deprecated, please use config.redis.password instead", { after = "4.0", }) @@ -167,6 +179,10 @@ return { } }, { redis_username = { type = "string", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.username + end, func = function(value) deprecation("response-ratelimiting: config.redis_username is deprecated, please use config.redis.username instead", { after = "4.0", }) @@ -175,6 +191,10 @@ return { } }, { redis_ssl = { type = "boolean", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.ssl + end, func = function(value) deprecation("response-ratelimiting: config.redis_ssl is deprecated, please use config.redis.ssl instead", { after = "4.0", }) @@ -183,6 +203,10 @@ return { } }, { redis_ssl_verify = { type = "boolean", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.ssl_verify + end, func = function(value) deprecation("response-ratelimiting: config.redis_ssl_verify is deprecated, please use config.redis.ssl_verify instead", { after = "4.0", }) @@ -191,6 +215,10 @@ return { } }, { redis_server_name = { type = "string", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.server_name + end, func = function(value) deprecation("response-ratelimiting: config.redis_server_name is deprecated, please use config.redis.server_name instead", { after = "4.0", }) @@ -199,6 +227,10 @@ return { } }, { redis_timeout = { type = "integer", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.timeout + end, func = function(value) deprecation("response-ratelimiting: config.redis_timeout is deprecated, please use config.redis.timeout instead", { after = "4.0", }) @@ -207,6 +239,10 @@ return { } }, { redis_database = { type = "integer", + include_in_output = true, + translate_backwards = function(instance) + return instance.redis.database + end, func = function(value) deprecation("response-ratelimiting: config.redis_database is deprecated, please use config.redis.database instead", { after = "4.0", }) diff --git a/spec/02-integration/09-hybrid_mode/09-config-compat_spec.lua b/spec/02-integration/09-hybrid_mode/09-config-compat_spec.lua index 60b07225bd2..f1180b6884a 100644 --- a/spec/02-integration/09-hybrid_mode/09-config-compat_spec.lua +++ b/spec/02-integration/09-hybrid_mode/09-config-compat_spec.lua @@ -120,7 +120,10 @@ describe("CP/DP config compat transformations #" .. strategy, function() enabled = true, config = { second = 1, - policy = "local", + policy = "redis", + redis = { + host = "localhost" + }, -- [[ new fields error_code = 403, @@ -134,6 +137,7 @@ describe("CP/DP config compat transformations #" .. strategy, function() should not have: error_code, error_message, sync_rate --]] local expected = utils.cycle_aware_deep_copy(rate_limit) + expected.config.redis = nil expected.config.error_code = nil expected.config.error_message = nil expected.config.sync_rate = nil @@ -146,6 +150,7 @@ describe("CP/DP config compat transformations #" .. strategy, function() should not have: sync_rate --]] expected = utils.cycle_aware_deep_copy(rate_limit) + expected.config.redis = nil expected.config.sync_rate = nil do_assert(utils.uuid(), "3.2.0", expected) @@ -156,6 +161,7 @@ describe("CP/DP config compat transformations #" .. strategy, function() should not have: sync_rate --]] expected = utils.cycle_aware_deep_copy(rate_limit) + expected.config.redis = nil expected.config.sync_rate = nil do_assert(utils.uuid(), "3.3.0", expected) @@ -169,7 +175,10 @@ describe("CP/DP config compat transformations #" .. strategy, function() enabled = true, config = { second = 1, - policy = "local", + policy = "redis", + redis = { + host = "localhost" + }, -- [[ new fields error_code = 403, @@ -179,7 +188,9 @@ describe("CP/DP config compat transformations #" .. strategy, function() }, } - do_assert(utils.uuid(), "3.4.0", rate_limit) + local expected = utils.cycle_aware_deep_copy(rate_limit) + expected.config.redis = nil + do_assert(utils.uuid(), "3.4.0", expected) -- cleanup admin.plugins:remove({ id = rate_limit.id }) diff --git a/spec/03-plugins/23-rate-limiting/05-integration_spec.lua b/spec/03-plugins/23-rate-limiting/05-integration_spec.lua index 0c86093f27d..207cbb09918 100644 --- a/spec/03-plugins/23-rate-limiting/05-integration_spec.lua +++ b/spec/03-plugins/23-rate-limiting/05-integration_spec.lua @@ -497,6 +497,17 @@ describe("Plugin: rate-limiting (integration)", function() assert.same(plugin_config.redis_ssl_verify, json.config.redis.ssl_verify) assert.same(plugin_config.redis_server_name, json.config.redis.server_name) + -- verify that legacy fields are present for backwards compatibility + assert.same(plugin_config.redis_host, json.config.redis_host) + assert.same(plugin_config.redis_port, json.config.redis_port) + assert.same(plugin_config.redis_username, json.config.redis_username) + assert.same(plugin_config.redis_password, json.config.redis_password) + assert.same(plugin_config.redis_database, json.config.redis_database) + assert.same(plugin_config.redis_timeout, json.config.redis_timeout) + assert.same(plugin_config.redis_ssl, json.config.redis_ssl) + assert.same(plugin_config.redis_ssl_verify, json.config.redis_ssl_verify) + assert.same(plugin_config.redis_server_name, json.config.redis_server_name) + delete_plugin(admin_client, json) assert.logfile().has.line("rate-limiting: config.redis_host is deprecated, please use config.redis.host instead (deprecated after 4.0)", true) diff --git a/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua b/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua index aae19ecee50..bd0544d33e4 100644 --- a/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua +++ b/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua @@ -510,6 +510,17 @@ describe("Plugin: rate-limiting (integration)", function() assert.same(plugin_config.redis_ssl_verify, json.config.redis.ssl_verify) assert.same(plugin_config.redis_server_name, json.config.redis.server_name) + -- verify that legacy fields are present for backwards compatibility + assert.same(plugin_config.redis_host, json.config.redis_host) + assert.same(plugin_config.redis_port, json.config.redis_port) + assert.same(plugin_config.redis_username, json.config.redis_username) + assert.same(plugin_config.redis_password, json.config.redis_password) + assert.same(plugin_config.redis_database, json.config.redis_database) + assert.same(plugin_config.redis_timeout, json.config.redis_timeout) + assert.same(plugin_config.redis_ssl, json.config.redis_ssl) + assert.same(plugin_config.redis_ssl_verify, json.config.redis_ssl_verify) + assert.same(plugin_config.redis_server_name, json.config.redis_server_name) + delete_plugin(admin_client, json) assert.logfile().has.line("response-ratelimiting: config.redis_host is deprecated, please use config.redis.host instead (deprecated after 4.0)", true) diff --git a/spec/03-plugins/29-acme/05-redis_storage_spec.lua b/spec/03-plugins/29-acme/05-redis_storage_spec.lua index 8bcbc8e4b26..3298dcbaf01 100644 --- a/spec/03-plugins/29-acme/05-redis_storage_spec.lua +++ b/spec/03-plugins/29-acme/05-redis_storage_spec.lua @@ -380,6 +380,12 @@ describe("Plugin: acme (storage.redis)", function() assert.same(redis_config.scan_count, json.config.storage_config.redis.extra_options.scan_count) assert.same(redis_config.namespace, json.config.storage_config.redis.extra_options.namespace) + -- verify that legacy fields are present for backwards compatibility + assert.same(redis_config.auth, json.config.storage_config.redis.auth) + assert.same(redis_config.ssl_server_name, json.config.storage_config.redis.ssl_server_name) + assert.same(redis_config.scan_count, json.config.storage_config.redis.scan_count) + assert.same(redis_config.namespace, json.config.storage_config.redis.namespace) + delete_plugin(client, json) assert.logfile().has.line("acme: config.storage_config.redis.namespace is deprecated, " .. From 4e515833e63896cb5ded292dde884974d5bf4574 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Tue, 6 Feb 2024 15:06:06 +0100 Subject: [PATCH 26/91] chore(dao): refactor translate_backwards This commits simplifies translate_backwards feature by switching from function that retrieves value approach to a table with a path to the necessary key. It also adds tests for other paths (GET, POST, PUT, PATCH) for querying admin api. KAG-3677 --- kong/db/schema/init.lua | 6 +- kong/db/schema/metaschema.lua | 3 +- kong/plugins/acme/schema.lua | 20 +- kong/plugins/rate-limiting/schema.lua | 45 +--- kong/plugins/response-ratelimiting/schema.lua | 45 +--- kong/tools/table.lua | 17 ++ spec/01-unit/05-utils_spec.lua | 36 +++ .../06-shorthand_fields_spec.lua | 225 +++++++++++++++++ .../06-shorthand_fields_spec.lua | 233 ++++++++++++++++++ .../29-acme/07-shorthand_fields_spec.lua | 156 ++++++++++++ 10 files changed, 693 insertions(+), 93 deletions(-) create mode 100644 spec/03-plugins/23-rate-limiting/06-shorthand_fields_spec.lua create mode 100644 spec/03-plugins/24-response-rate-limiting/06-shorthand_fields_spec.lua create mode 100644 spec/03-plugins/29-acme/07-shorthand_fields_spec.lua diff --git a/kong/db/schema/init.lua b/kong/db/schema/init.lua index ea6c673e8ba..2d241ebb200 100644 --- a/kong/db/schema/init.lua +++ b/kong/db/schema/init.lua @@ -1681,8 +1681,8 @@ function Schema:process_auto_fields(data, context, nulls, opts) end end - if is_select and sdata.include_in_output and opts.expand_shorthands then - data[sname] = sdata.translate_backwards(data) + if is_select and sdata.translate_backwards and opts.expand_shorthands then + data[sname] = utils.table_path(data, sdata.translate_backwards) end end if has_errs then @@ -1917,7 +1917,7 @@ function Schema:process_auto_fields(data, context, nulls, opts) if self.shorthand_fields then for _, shorthand_field in ipairs(self.shorthand_fields) do - if shorthand_field[key] and shorthand_field[key].include_in_output then + if shorthand_field[key] and shorthand_field[key].translate_backwards then should_be_in_ouput = is_select end end diff --git a/kong/db/schema/metaschema.lua b/kong/db/schema/metaschema.lua index 36bb8747ed2..5c35424c402 100644 --- a/kong/db/schema/metaschema.lua +++ b/kong/db/schema/metaschema.lua @@ -683,8 +683,7 @@ local function make_shorthand_field_schema() shorthand_field_schema[1] = { type = { type = "string", one_of = shorthand_field_types, required = true }, } insert(shorthand_field_schema, { func = { type = "function", required = true } }) - insert(shorthand_field_schema, { translate_backwards = { type = "function", required = false } }) - insert(shorthand_field_schema, { include_in_output = { type = "boolean", required = false, default = false } }) + insert(shorthand_field_schema, { translate_backwards = { type = "array", elements = { type = "string" }, required = false } }) return shorthand_field_schema end diff --git a/kong/plugins/acme/schema.lua b/kong/plugins/acme/schema.lua index 2cbf4dd5940..1c4d03be53d 100644 --- a/kong/plugins/acme/schema.lua +++ b/kong/plugins/acme/schema.lua @@ -42,10 +42,7 @@ local LEGACY_SCHEMA_TRANSLATIONS = { { auth = { type = "string", len_min = 0, - include_in_output = true, - translate_backwards = function(instance) - return instance.password - end, + translate_backwards = {'password'}, func = function(value) deprecation("acme: config.storage_config.redis.auth is deprecated, please use config.storage_config.redis.password instead", { after = "4.0", }) @@ -54,10 +51,7 @@ local LEGACY_SCHEMA_TRANSLATIONS = { }}, { ssl_server_name = { type = "string", - include_in_output = true, - translate_backwards = function(instance) - return instance.server_name - end, + translate_backwards = {'server_name'}, func = function(value) deprecation("acme: config.storage_config.redis.ssl_server_name is deprecated, please use config.storage_config.redis.server_name instead", { after = "4.0", }) @@ -67,10 +61,7 @@ local LEGACY_SCHEMA_TRANSLATIONS = { { namespace = { type = "string", len_min = 0, - include_in_output = true, - translate_backwards = function(instance) - return instance.extra_options.namespace - end, + translate_backwards = {'extra_options', 'namespace'}, func = function(value) deprecation("acme: config.storage_config.redis.namespace is deprecated, please use config.storage_config.redis.extra_options.namespace instead", { after = "4.0", }) @@ -79,10 +70,7 @@ local LEGACY_SCHEMA_TRANSLATIONS = { }}, { scan_count = { type = "integer", - include_in_output = true, - translate_backwards = function(instance) - return instance.extra_options.scan_count - end, + translate_backwards = {'extra_options', 'scan_count'}, func = function(value) deprecation("acme: config.storage_config.redis.scan_count is deprecated, please use config.storage_config.redis.extra_options.scan_count instead", { after = "4.0", }) diff --git a/kong/plugins/rate-limiting/schema.lua b/kong/plugins/rate-limiting/schema.lua index 898d44e416b..21d48bfe29b 100644 --- a/kong/plugins/rate-limiting/schema.lua +++ b/kong/plugins/rate-limiting/schema.lua @@ -103,10 +103,7 @@ return { -- TODO: deprecated forms, to be removed in Kong 4.0 { redis_host = { type = "string", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.host - end, + translate_backwards = {'redis', 'host'}, func = function(value) deprecation("rate-limiting: config.redis_host is deprecated, please use config.redis.host instead", { after = "4.0", }) @@ -115,10 +112,7 @@ return { } }, { redis_port = { type = "integer", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.port - end, + translate_backwards = {'redis', 'port'}, func = function(value) deprecation("rate-limiting: config.redis_port is deprecated, please use config.redis.port instead", { after = "4.0", }) @@ -128,10 +122,7 @@ return { { redis_password = { type = "string", len_min = 0, - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.password - end, + translate_backwards = {'redis', 'password'}, func = function(value) deprecation("rate-limiting: config.redis_password is deprecated, please use config.redis.password instead", { after = "4.0", }) @@ -140,10 +131,7 @@ return { } }, { redis_username = { type = "string", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.username - end, + translate_backwards = {'redis', 'username'}, func = function(value) deprecation("rate-limiting: config.redis_username is deprecated, please use config.redis.username instead", { after = "4.0", }) @@ -152,10 +140,7 @@ return { } }, { redis_ssl = { type = "boolean", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.ssl - end, + translate_backwards = {'redis', 'ssl'}, func = function(value) deprecation("rate-limiting: config.redis_ssl is deprecated, please use config.redis.ssl instead", { after = "4.0", }) @@ -164,10 +149,7 @@ return { } }, { redis_ssl_verify = { type = "boolean", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.ssl_verify - end, + translate_backwards = {'redis', 'ssl_verify'}, func = function(value) deprecation("rate-limiting: config.redis_ssl_verify is deprecated, please use config.redis.ssl_verify instead", { after = "4.0", }) @@ -176,10 +158,7 @@ return { } }, { redis_server_name = { type = "string", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.server_name - end, + translate_backwards = {'redis', 'server_name'}, func = function(value) deprecation("rate-limiting: config.redis_server_name is deprecated, please use config.redis.server_name instead", { after = "4.0", }) @@ -188,10 +167,7 @@ return { } }, { redis_timeout = { type = "integer", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.timeout - end, + translate_backwards = {'redis', 'timeout'}, func = function(value) deprecation("rate-limiting: config.redis_timeout is deprecated, please use config.redis.timeout instead", { after = "4.0", }) @@ -200,10 +176,7 @@ return { } }, { redis_database = { type = "integer", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.database - end, + translate_backwards = {'redis', 'database'}, func = function(value) deprecation("rate-limiting: config.redis_database is deprecated, please use config.redis.database instead", { after = "4.0", }) diff --git a/kong/plugins/response-ratelimiting/schema.lua b/kong/plugins/response-ratelimiting/schema.lua index 0c45f0e51c5..4c6f765343b 100644 --- a/kong/plugins/response-ratelimiting/schema.lua +++ b/kong/plugins/response-ratelimiting/schema.lua @@ -142,10 +142,7 @@ return { -- TODO: deprecated forms, to be removed in Kong 4.0 { redis_host = { type = "string", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.host - end, + translate_backwards = {'redis', 'host'}, func = function(value) deprecation("response-ratelimiting: config.redis_host is deprecated, please use config.redis.host instead", { after = "4.0", }) @@ -154,10 +151,7 @@ return { } }, { redis_port = { type = "integer", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.port - end, + translate_backwards = {'redis', 'port'}, func = function(value) deprecation("response-ratelimiting: config.redis_port is deprecated, please use config.redis.port instead", { after = "4.0", }) @@ -167,10 +161,7 @@ return { { redis_password = { type = "string", len_min = 0, - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.password - end, + translate_backwards = {'redis', 'password'}, func = function(value) deprecation("response-ratelimiting: config.redis_password is deprecated, please use config.redis.password instead", { after = "4.0", }) @@ -179,10 +170,7 @@ return { } }, { redis_username = { type = "string", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.username - end, + translate_backwards = {'redis', 'username'}, func = function(value) deprecation("response-ratelimiting: config.redis_username is deprecated, please use config.redis.username instead", { after = "4.0", }) @@ -191,10 +179,7 @@ return { } }, { redis_ssl = { type = "boolean", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.ssl - end, + translate_backwards = {'redis', 'ssl'}, func = function(value) deprecation("response-ratelimiting: config.redis_ssl is deprecated, please use config.redis.ssl instead", { after = "4.0", }) @@ -203,10 +188,7 @@ return { } }, { redis_ssl_verify = { type = "boolean", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.ssl_verify - end, + translate_backwards = {'redis', 'ssl_verify'}, func = function(value) deprecation("response-ratelimiting: config.redis_ssl_verify is deprecated, please use config.redis.ssl_verify instead", { after = "4.0", }) @@ -215,10 +197,7 @@ return { } }, { redis_server_name = { type = "string", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.server_name - end, + translate_backwards = {'redis', 'server_name'}, func = function(value) deprecation("response-ratelimiting: config.redis_server_name is deprecated, please use config.redis.server_name instead", { after = "4.0", }) @@ -227,10 +206,7 @@ return { } }, { redis_timeout = { type = "integer", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.timeout - end, + translate_backwards = {'redis', 'timeout'}, func = function(value) deprecation("response-ratelimiting: config.redis_timeout is deprecated, please use config.redis.timeout instead", { after = "4.0", }) @@ -239,10 +215,7 @@ return { } }, { redis_database = { type = "integer", - include_in_output = true, - translate_backwards = function(instance) - return instance.redis.database - end, + translate_backwards = {'redis', 'database'}, func = function(value) deprecation("response-ratelimiting: config.redis_database is deprecated, please use config.redis.database instead", { after = "4.0", }) diff --git a/kong/tools/table.lua b/kong/tools/table.lua index f5fea379c70..19d6265048f 100644 --- a/kong/tools/table.lua +++ b/kong/tools/table.lua @@ -307,5 +307,22 @@ function _M.add_error(errors, k, v) return errors end +--- Retrieves a value from table using path. +-- @param t The source table to retrieve the value from. +-- @param path Path table containing keys +-- @param v Value of the error +-- @return Returns `value` if something was found and `nil` otherwise +function _M.table_path(t, path) + local current_value = t + for _, path_element in ipairs(path) do + if current_value[path_element] == nil then + return nil + end + + current_value = current_value[path_element] + end + + return current_value +end return _M diff --git a/spec/01-unit/05-utils_spec.lua b/spec/01-unit/05-utils_spec.lua index ea0fb9c1188..03082bc6fee 100644 --- a/spec/01-unit/05-utils_spec.lua +++ b/spec/01-unit/05-utils_spec.lua @@ -1648,4 +1648,40 @@ describe("Utils", function() assert.equal(meta, getmetatable(t3.b.a)) end) end) + + describe("table_path(t, path)", function() + local t = { + x = 1, + a = { + b = { + c = 200 + }, + }, + z = 2 + } + + it("retrieves value from table based on path - single level", function() + local path = { "x" } + + assert.equal(1, utils.table_path(t, path)) + end) + + it("retrieves value from table based on path - deep value", function() + local path = { "a", "b", "c" } + + assert.equal(200, utils.table_path(t, path)) + end) + + it("returns nil if element is not found - leaf not found", function() + local path = { "a", "b", "x" } + + assert.equal(nil, utils.table_path(t, path)) + end) + + it("returns nil if element is not found - root branch not found", function() + local path = { "o", "j", "k" } + + assert.equal(nil, utils.table_path(t, path)) + end) + end) end) diff --git a/spec/03-plugins/23-rate-limiting/06-shorthand_fields_spec.lua b/spec/03-plugins/23-rate-limiting/06-shorthand_fields_spec.lua new file mode 100644 index 00000000000..b279e62eeaf --- /dev/null +++ b/spec/03-plugins/23-rate-limiting/06-shorthand_fields_spec.lua @@ -0,0 +1,225 @@ +local helpers = require "spec.helpers" +local utils = require "kong.tools.utils" +local cjson = require "cjson" + + +describe("Plugin: rate-limiting (shorthand fields)", function() + local bp, route, admin_client + local plugin_id = utils.uuid() + + lazy_setup(function() + bp = helpers.get_db_utils(nil, { + "routes", + "services", + "plugins", + }, { + "rate-limiting" + }) + + route = assert(bp.routes:insert { + hosts = { "redis.test" }, + }) + + assert(helpers.start_kong()) + admin_client = helpers.admin_client() + end) + + lazy_teardown(function() + if admin_client then + admin_client:close() + end + + helpers.stop_kong() + end) + + local function assert_redis_config_same(expected_config, received_config) + -- verify that legacy config got written into new structure + assert.same(expected_config.redis_host, received_config.redis.host) + assert.same(expected_config.redis_port, received_config.redis.port) + assert.same(expected_config.redis_username, received_config.redis.username) + assert.same(expected_config.redis_password, received_config.redis.password) + assert.same(expected_config.redis_database, received_config.redis.database) + assert.same(expected_config.redis_timeout, received_config.redis.timeout) + assert.same(expected_config.redis_ssl, received_config.redis.ssl) + assert.same(expected_config.redis_ssl_verify, received_config.redis.ssl_verify) + assert.same(expected_config.redis_server_name, received_config.redis.server_name) + + -- verify that legacy fields are present for backwards compatibility + assert.same(expected_config.redis_host, received_config.redis_host) + assert.same(expected_config.redis_port, received_config.redis_port) + assert.same(expected_config.redis_username, received_config.redis_username) + assert.same(expected_config.redis_password, received_config.redis_password) + assert.same(expected_config.redis_database, received_config.redis_database) + assert.same(expected_config.redis_timeout, received_config.redis_timeout) + assert.same(expected_config.redis_ssl, received_config.redis_ssl) + assert.same(expected_config.redis_ssl_verify, received_config.redis_ssl_verify) + assert.same(expected_config.redis_server_name, received_config.redis_server_name) + end + + describe("single plugin tests", function() + local plugin_config = { + minute = 100, + policy = "redis", + redis_host = "custom-host.example.test", + redis_port = 55000, + redis_username = "test1", + redis_password = "testX", + redis_database = 1, + redis_timeout = 1100, + redis_ssl = true, + redis_ssl_verify = true, + redis_server_name = "example.test", + } + + after_each(function () + local res = assert(admin_client:send({ + method = "DELETE", + path = "/plugins/" .. plugin_id, + })) + + assert.res_status(204, res) + end) + + it("POST/PATCH/GET request returns legacy fields", function() + -- POST + local res = assert(admin_client:send { + method = "POST", + route = { + id = route.id + }, + path = "/plugins", + headers = { ["Content-Type"] = "application/json" }, + body = { + id = plugin_id, + name = "rate-limiting", + config = plugin_config, + }, + }) + + local json = cjson.decode(assert.res_status(201, res)) + assert_redis_config_same(plugin_config, json.config) + + -- PATCH + local updated_host = 'testhost' + res = assert(admin_client:send { + method = "PATCH", + path = "/plugins/" .. plugin_id, + headers = { ["Content-Type"] = "application/json" }, + body = { + name = "rate-limiting", + config = { + redis_host = updated_host + }, + }, + }) + + json = cjson.decode(assert.res_status(200, res)) + local patched_config = utils.cycle_aware_deep_copy(plugin_config) + patched_config.redis_host = updated_host + assert_redis_config_same(patched_config, json.config) + + -- GET + res = assert(admin_client:send { + method = "GET", + path = "/plugins/" .. plugin_id + }) + + json = cjson.decode(assert.res_status(200, res)) + assert_redis_config_same(patched_config, json.config) + end) + + it("successful PUT request returns legacy fields", function() + local res = assert(admin_client:send { + method = "PUT", + route = { + id = route.id + }, + path = "/plugins/" .. plugin_id, + headers = { ["Content-Type"] = "application/json" }, + body = { + name = "rate-limiting", + config = plugin_config, + }, + }) + + local json = cjson.decode(assert.res_status(200, res)) + assert_redis_config_same(plugin_config, json.config) + end) + end) + + describe('mutliple instances', function() + local redis1_port = 55000 + lazy_setup(function() + local routes_count = 100 + for i=1,routes_count do + local route = assert(bp.routes:insert { + hosts = { "redis" .. tostring(i) .. ".test" }, + }) + assert(bp.plugins:insert { + name = "rate-limiting", + route = { id = route.id }, + config = { + minute = 100 + i, + policy = "redis", + redis_host = "custom-host" .. tostring(i) .. ".example.test", + redis_port = redis1_port + i, + redis_username = "test1", + redis_password = "testX", + redis_database = 1, + redis_timeout = 1100, + redis_ssl = true, + redis_ssl_verify = true, + redis_server_name = "example" .. tostring(i) .. ".test", + }, + }) + end + end) + + it('get collection', function () + local res = assert(admin_client:send { + path = "/plugins" + }) + + local json = cjson.decode(assert.res_status(200, res)) + for _,plugin in ipairs(json.data) do + local i = plugin.config.redis.port - redis1_port + local expected_config = { + redis_host = "custom-host" .. tostring(i) .. ".example.test", + redis_port = redis1_port + i, + redis_username = "test1", + redis_password = "testX", + redis_database = 1, + redis_timeout = 1100, + redis_ssl = true, + redis_ssl_verify = true, + redis_server_name = "example" .. tostring(i) .. ".test", + } + assert_redis_config_same(expected_config, plugin.config) + end + end) + + it('get paginated collection', function () + local res = assert(admin_client:send { + path = "/plugins", + query = { size = 50 } + }) + + local json = cjson.decode(assert.res_status(200, res)) + for _,plugin in ipairs(json.data) do + local i = plugin.config.redis.port - redis1_port + local expected_config = { + redis_host = "custom-host" .. tostring(i) .. ".example.test", + redis_port = redis1_port + i, + redis_username = "test1", + redis_password = "testX", + redis_database = 1, + redis_timeout = 1100, + redis_ssl = true, + redis_ssl_verify = true, + redis_server_name = "example" .. tostring(i) .. ".test", + } + assert_redis_config_same(expected_config, plugin.config) + end + end) + end) +end) diff --git a/spec/03-plugins/24-response-rate-limiting/06-shorthand_fields_spec.lua b/spec/03-plugins/24-response-rate-limiting/06-shorthand_fields_spec.lua new file mode 100644 index 00000000000..f506d85ea64 --- /dev/null +++ b/spec/03-plugins/24-response-rate-limiting/06-shorthand_fields_spec.lua @@ -0,0 +1,233 @@ +local helpers = require "spec.helpers" +local utils = require "kong.tools.utils" +local cjson = require "cjson" + + +describe("Plugin: response-ratelimiting (shorthand fields)", function() + local bp, route, admin_client + local plugin_id = utils.uuid() + + lazy_setup(function() + bp = helpers.get_db_utils(nil, { + "routes", + "services", + "plugins", + }, { + "response-ratelimiting" + }) + + route = assert(bp.routes:insert { + hosts = { "redis.test" }, + }) + + assert(helpers.start_kong()) + admin_client = helpers.admin_client() + end) + + lazy_teardown(function() + if admin_client then + admin_client:close() + end + + helpers.stop_kong() + end) + + local function assert_redis_config_same(expected_config, received_config) + -- verify that legacy config got written into new structure + assert.same(expected_config.redis_host, received_config.redis.host) + assert.same(expected_config.redis_port, received_config.redis.port) + assert.same(expected_config.redis_username, received_config.redis.username) + assert.same(expected_config.redis_password, received_config.redis.password) + assert.same(expected_config.redis_database, received_config.redis.database) + assert.same(expected_config.redis_timeout, received_config.redis.timeout) + assert.same(expected_config.redis_ssl, received_config.redis.ssl) + assert.same(expected_config.redis_ssl_verify, received_config.redis.ssl_verify) + assert.same(expected_config.redis_server_name, received_config.redis.server_name) + + -- verify that legacy fields are present for backwards compatibility + assert.same(expected_config.redis_host, received_config.redis_host) + assert.same(expected_config.redis_port, received_config.redis_port) + assert.same(expected_config.redis_username, received_config.redis_username) + assert.same(expected_config.redis_password, received_config.redis_password) + assert.same(expected_config.redis_database, received_config.redis_database) + assert.same(expected_config.redis_timeout, received_config.redis_timeout) + assert.same(expected_config.redis_ssl, received_config.redis_ssl) + assert.same(expected_config.redis_ssl_verify, received_config.redis_ssl_verify) + assert.same(expected_config.redis_server_name, received_config.redis_server_name) + end + + describe("single plugin tests", function() + local plugin_config = { + limits = { + video = { + minute = 100, + } + }, + policy = "redis", + redis_host = "custom-host.example.test", + redis_port = 55000, + redis_username = "test1", + redis_password = "testX", + redis_database = 1, + redis_timeout = 1100, + redis_ssl = true, + redis_ssl_verify = true, + redis_server_name = "example.test", + } + + after_each(function () + local res = assert(admin_client:send({ + method = "DELETE", + path = "/plugins/" .. plugin_id, + })) + + assert.res_status(204, res) + end) + + it("POST/PATCH/GET request returns legacy fields", function() + -- POST + local res = assert(admin_client:send { + method = "POST", + route = { + id = route.id + }, + path = "/plugins", + headers = { ["Content-Type"] = "application/json" }, + body = { + id = plugin_id, + name = "response-ratelimiting", + config = plugin_config, + }, + }) + + local json = cjson.decode(assert.res_status(201, res)) + assert_redis_config_same(plugin_config, json.config) + + -- PATCH + local updated_host = 'testhost' + res = assert(admin_client:send { + method = "PATCH", + path = "/plugins/" .. plugin_id, + headers = { ["Content-Type"] = "application/json" }, + body = { + name = "response-ratelimiting", + config = { + redis_host = updated_host + }, + }, + }) + + json = cjson.decode(assert.res_status(200, res)) + local patched_config = utils.cycle_aware_deep_copy(plugin_config) + patched_config.redis_host = updated_host + assert_redis_config_same(patched_config, json.config) + + -- GET + res = assert(admin_client:send { + method = "GET", + path = "/plugins/" .. plugin_id + }) + + json = cjson.decode(assert.res_status(200, res)) + assert_redis_config_same(patched_config, json.config) + end) + + it("successful PUT request returns legacy fields", function() + local res = assert(admin_client:send { + method = "PUT", + route = { + id = route.id + }, + path = "/plugins/" .. plugin_id, + headers = { ["Content-Type"] = "application/json" }, + body = { + name = "response-ratelimiting", + config = plugin_config, + }, + }) + + local json = cjson.decode(assert.res_status(200, res)) + assert_redis_config_same(plugin_config, json.config) + end) + end) + + describe('mutliple instances', function() + local redis1_port = 55000 + lazy_setup(function() + local routes_count = 100 + for i=1,routes_count do + local route = assert(bp.routes:insert { + hosts = { "redis" .. tostring(i) .. ".test" }, + }) + assert(bp.plugins:insert { + name = "response-ratelimiting", + route = { id = route.id }, + config = { + limits = { + video = { + minute = 100 + i, + } + }, + policy = "redis", + redis_host = "custom-host" .. tostring(i) .. ".example.test", + redis_port = redis1_port + i, + redis_username = "test1", + redis_password = "testX", + redis_database = 1, + redis_timeout = 1100, + redis_ssl = true, + redis_ssl_verify = true, + redis_server_name = "example" .. tostring(i) .. ".test", + }, + }) + end + end) + + it('get collection', function () + local res = assert(admin_client:send { + path = "/plugins" + }) + + local json = cjson.decode(assert.res_status(200, res)) + for _,plugin in ipairs(json.data) do + local i = plugin.config.redis.port - redis1_port + local expected_config = { + redis_host = "custom-host" .. tostring(i) .. ".example.test", + redis_port = redis1_port + i, + redis_username = "test1", + redis_password = "testX", + redis_database = 1, + redis_timeout = 1100, + redis_ssl = true, + redis_ssl_verify = true, + redis_server_name = "example" .. tostring(i) .. ".test", + } + assert_redis_config_same(expected_config, plugin.config) + end + end) + + it('get paginated collection', function () + local res = assert(admin_client:send { + path = "/plugins", + query = { size = 50 } + }) + + local json = cjson.decode(assert.res_status(200, res)) + for _,plugin in ipairs(json.data) do + local i = plugin.config.redis.port - redis1_port + local expected_config = { + redis_host = "custom-host" .. tostring(i) .. ".example.test", + redis_port = redis1_port + i, + redis_username = "test1", + redis_password = "testX", + redis_database = 1, + redis_timeout = 1100, + redis_ssl = true, + redis_ssl_verify = true, + redis_server_name = "example" .. tostring(i) .. ".test", + } + assert_redis_config_same(expected_config, plugin.config) + end + end) + end) +end) diff --git a/spec/03-plugins/29-acme/07-shorthand_fields_spec.lua b/spec/03-plugins/29-acme/07-shorthand_fields_spec.lua new file mode 100644 index 00000000000..69ea2147e56 --- /dev/null +++ b/spec/03-plugins/29-acme/07-shorthand_fields_spec.lua @@ -0,0 +1,156 @@ +local helpers = require "spec.helpers" +local utils = require "kong.tools.utils" +local cjson = require "cjson" + + +describe("Plugin: acme (shorthand fields)", function() + local bp, route, admin_client + local plugin_id = utils.uuid() + + lazy_setup(function() + bp = helpers.get_db_utils(nil, { + "routes", + "services", + "plugins", + }, { + "acme" + }) + + route = assert(bp.routes:insert { + hosts = { "redis.test" }, + }) + + assert(helpers.start_kong()) + admin_client = helpers.admin_client() + end) + + lazy_teardown(function() + if admin_client then + admin_client:close() + end + + helpers.stop_kong() + end) + + local function assert_redis_config_same(expected_config, received_config) + -- verify that legacy config got written into new structure + assert.same(expected_config.host, received_config.storage_config.redis.host) + assert.same(expected_config.port, received_config.storage_config.redis.port) + assert.same(expected_config.auth, received_config.storage_config.redis.password) + assert.same(expected_config.database, received_config.storage_config.redis.database) + assert.same(expected_config.timeout, received_config.storage_config.redis.timeout) + assert.same(expected_config.ssl, received_config.storage_config.redis.ssl) + assert.same(expected_config.ssl_verify, received_config.storage_config.redis.ssl_verify) + assert.same(expected_config.ssl_server_name, received_config.storage_config.redis.server_name) + assert.same(expected_config.scan_count, received_config.storage_config.redis.extra_options.scan_count) + assert.same(expected_config.namespace, received_config.storage_config.redis.extra_options.namespace) + + -- verify that legacy fields are present for backwards compatibility + assert.same(expected_config.auth, received_config.storage_config.redis.auth) + assert.same(expected_config.ssl_server_name, received_config.storage_config.redis.ssl_server_name) + assert.same(expected_config.scan_count, received_config.storage_config.redis.scan_count) + assert.same(expected_config.namespace, received_config.storage_config.redis.namespace) + end + + describe("single plugin tests", function() + local redis_config = { + host = helpers.redis_host, + port = helpers.redis_port, + auth = "test", + database = 1, + timeout = 3500, + ssl = true, + ssl_verify = true, + ssl_server_name = "example.test", + scan_count = 13, + namespace = "namespace2:", + } + + local plugin_config = { + account_email = "test@test.com", + storage = "redis", + storage_config = { + redis = redis_config, + }, + } + + after_each(function () + local res = assert(admin_client:send({ + method = "DELETE", + path = "/plugins/" .. plugin_id, + })) + + assert.res_status(204, res) + end) + + it("POST/PATCH/GET request returns legacy fields", function() + -- POST + local res = assert(admin_client:send { + method = "POST", + route = { + id = route.id + }, + path = "/plugins", + headers = { ["Content-Type"] = "application/json" }, + body = { + id = plugin_id, + name = "acme", + config = plugin_config, + }, + }) + + local json = cjson.decode(assert.res_status(201, res)) + assert_redis_config_same(redis_config, json.config) + + -- PATCH + local updated_host = 'testhost' + res = assert(admin_client:send { + method = "PATCH", + path = "/plugins/" .. plugin_id, + headers = { ["Content-Type"] = "application/json" }, + body = { + name = "acme", + config = { + storage_config = { + redis = { + host = updated_host + } + } + }, + }, + }) + + json = cjson.decode(assert.res_status(200, res)) + local patched_config = utils.cycle_aware_deep_copy(redis_config) + patched_config.host = updated_host + assert_redis_config_same(patched_config, json.config) + + -- GET + res = assert(admin_client:send { + method = "GET", + path = "/plugins/" .. plugin_id + }) + + json = cjson.decode(assert.res_status(200, res)) + assert_redis_config_same(patched_config, json.config) + end) + + it("successful PUT request returns legacy fields", function() + local res = assert(admin_client:send { + method = "PUT", + route = { + id = route.id + }, + path = "/plugins/" .. plugin_id, + headers = { ["Content-Type"] = "application/json" }, + body = { + name = "acme", + config = plugin_config, + }, + }) + + local json = cjson.decode(assert.res_status(200, res)) + assert_redis_config_same(redis_config, json.config) + end) + end) +end) From ade70f6116c8a6db6e1c87f9425450183c328c46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Wed, 7 Feb 2024 10:07:05 +0100 Subject: [PATCH 27/91] fix(dao): add missing shorthand fields expansions This commits adds shorthand expansions for select_by_ ... methods KAG-3686 --- kong/db/dao/init.lua | 16 ++++++++++++ kong/db/schema/init.lua | 2 +- .../06-shorthand_fields_spec.lua | 25 ++++++++++++++++++ .../06-shorthand_fields_spec.lua | 26 +++++++++++++++++++ 4 files changed, 68 insertions(+), 1 deletion(-) diff --git a/kong/db/dao/init.lua b/kong/db/dao/init.lua index fdbf928bdab..9456dd51a63 100644 --- a/kong/db/dao/init.lua +++ b/kong/db/dao/init.lua @@ -710,6 +710,10 @@ local function generate_foreign_key_methods(schema) end local entities, err + if options == nil or options.expand_shorthands == nil then + options = options or {} + options.expand_shorthands = true + end entities, err, err_t = self:rows_to_entities(rows, options) if err then return nil, err, err_t @@ -768,6 +772,10 @@ local function generate_foreign_key_methods(schema) end local err + if options == nil or options.expand_shorthands == nil then + options = options or {} + options.expand_shorthands = true + end row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -812,6 +820,8 @@ local function generate_foreign_key_methods(schema) return nil, tostring(err_t), err_t end + options = options or {} + options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -862,6 +872,8 @@ local function generate_foreign_key_methods(schema) end local ws_id = row.ws_id + options = options or {} + options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -1389,6 +1401,10 @@ function DAO:select_by_cache_key(cache_key, options) local err local ws_id = row.ws_id + if options == nil or options.expand_shorthands == nil then + options = options or {} + options.expand_shorthands = true + end row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t diff --git a/kong/db/schema/init.lua b/kong/db/schema/init.lua index 2d241ebb200..86e8f88fe21 100644 --- a/kong/db/schema/init.lua +++ b/kong/db/schema/init.lua @@ -1681,7 +1681,7 @@ function Schema:process_auto_fields(data, context, nulls, opts) end end - if is_select and sdata.translate_backwards and opts.expand_shorthands then + if is_select and sdata.translate_backwards and opts and opts.expand_shorthands then data[sname] = utils.table_path(data, sdata.translate_backwards) end end diff --git a/spec/03-plugins/23-rate-limiting/06-shorthand_fields_spec.lua b/spec/03-plugins/23-rate-limiting/06-shorthand_fields_spec.lua index b279e62eeaf..6fff6ee1f70 100644 --- a/spec/03-plugins/23-rate-limiting/06-shorthand_fields_spec.lua +++ b/spec/03-plugins/23-rate-limiting/06-shorthand_fields_spec.lua @@ -153,6 +153,7 @@ describe("Plugin: rate-limiting (shorthand fields)", function() local routes_count = 100 for i=1,routes_count do local route = assert(bp.routes:insert { + name = "route-" .. tostring(i), hosts = { "redis" .. tostring(i) .. ".test" }, }) assert(bp.plugins:insert { @@ -221,5 +222,29 @@ describe("Plugin: rate-limiting (shorthand fields)", function() assert_redis_config_same(expected_config, plugin.config) end end) + + it('get plugins by route', function () + local res = assert(admin_client:send { + path = "/routes/route-1/plugins", + query = { size = 50 } + }) + + local json = cjson.decode(assert.res_status(200, res)) + for _,plugin in ipairs(json.data) do + local i = plugin.config.redis.port - redis1_port + local expected_config = { + redis_host = "custom-host" .. tostring(i) .. ".example.test", + redis_port = redis1_port + i, + redis_username = "test1", + redis_password = "testX", + redis_database = 1, + redis_timeout = 1100, + redis_ssl = true, + redis_ssl_verify = true, + redis_server_name = "example" .. tostring(i) .. ".test", + } + assert_redis_config_same(expected_config, plugin.config) + end + end) end) end) diff --git a/spec/03-plugins/24-response-rate-limiting/06-shorthand_fields_spec.lua b/spec/03-plugins/24-response-rate-limiting/06-shorthand_fields_spec.lua index f506d85ea64..9b6fe34b863 100644 --- a/spec/03-plugins/24-response-rate-limiting/06-shorthand_fields_spec.lua +++ b/spec/03-plugins/24-response-rate-limiting/06-shorthand_fields_spec.lua @@ -157,6 +157,7 @@ describe("Plugin: response-ratelimiting (shorthand fields)", function() local routes_count = 100 for i=1,routes_count do local route = assert(bp.routes:insert { + name = "route-" .. tostring(i), hosts = { "redis" .. tostring(i) .. ".test" }, }) assert(bp.plugins:insert { @@ -229,5 +230,30 @@ describe("Plugin: response-ratelimiting (shorthand fields)", function() assert_redis_config_same(expected_config, plugin.config) end end) + + + it('get plugins by route', function () + local res = assert(admin_client:send { + path = "/routes/route-1/plugins", + query = { size = 50 } + }) + + local json = cjson.decode(assert.res_status(200, res)) + for _,plugin in ipairs(json.data) do + local i = plugin.config.redis.port - redis1_port + local expected_config = { + redis_host = "custom-host" .. tostring(i) .. ".example.test", + redis_port = redis1_port + i, + redis_username = "test1", + redis_password = "testX", + redis_database = 1, + redis_timeout = 1100, + redis_ssl = true, + redis_ssl_verify = true, + redis_server_name = "example" .. tostring(i) .. ".test", + } + assert_redis_config_same(expected_config, plugin.config) + end + end) end) end) From 85f2f1d784513ae9e8021350186a11e3b39e3b62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Wed, 7 Feb 2024 12:47:19 +0100 Subject: [PATCH 28/91] chore(tests): add tests for DAO methods --- kong/db/dao/init.lua | 2 + spec/02-integration/03-db/14-dao_spec.lua | 98 ++++++++++++++++++++++- 2 files changed, 97 insertions(+), 3 deletions(-) diff --git a/kong/db/dao/init.lua b/kong/db/dao/init.lua index 9456dd51a63..9f4f7854597 100644 --- a/kong/db/dao/init.lua +++ b/kong/db/dao/init.lua @@ -503,6 +503,8 @@ local function check_update(self, key, entity, options, name) return nil, nil, tostring(err_t), err_t end + options = options or {} + options.expand_shorthands = false local rbw_entity local err, err_t if name then diff --git a/spec/02-integration/03-db/14-dao_spec.lua b/spec/02-integration/03-db/14-dao_spec.lua index fd922fedd92..6f89834c7a4 100644 --- a/spec/02-integration/03-db/14-dao_spec.lua +++ b/spec/02-integration/03-db/14-dao_spec.lua @@ -1,11 +1,12 @@ local helpers = require "spec.helpers" +local utils = require "kong.tools.utils" local declarative = require "kong.db.declarative" -- Note: include "off" strategy here as well for _, strategy in helpers.all_strategies() do describe("db.dao #" .. strategy, function() local bp, db - local consumer, service, plugin, acl + local consumer, service, service2, plugin, plugin2, acl local group = "The A Team" lazy_setup(function() @@ -26,7 +27,12 @@ for _, strategy in helpers.all_strategies() do name = "abc", url = "http://localhost", } - + + service2 = bp.services:insert { + name = "def", + url = "http://2-localhost", + } + plugin = bp.plugins:insert { enabled = true, name = "acl", @@ -35,6 +41,20 @@ for _, strategy in helpers.all_strategies() do allow = { "*" }, }, } + + plugin2 = bp.plugins:insert { + enabled = true, + name = "rate-limiting", + instance_name = 'rate-limiting-instance-1', + service = service, + config = { + minute = 100, + policy = "redis", + redis = { + host = "localhost" + } + }, + } -- Note: bp in off strategy returns service=id instead of a table plugin.service = { id = service.id @@ -81,7 +101,7 @@ for _, strategy in helpers.all_strategies() do it("select_by_cache_key()", function() local cache_key = kong.db.acls:cache_key(consumer.id, group) - + local read_acl, err = kong.db.acls:select_by_cache_key(cache_key) assert.is_nil(err) assert.same(acl, read_acl) @@ -91,6 +111,78 @@ for _, strategy in helpers.all_strategies() do local read_plugin, err = kong.db.plugins:select_by_cache_key(cache_key) assert.is_nil(err) assert.same(plugin, read_plugin) + + cache_key = kong.db.plugins:cache_key("rate-limiting", nil, service.id, nil) + read_plugin, err = kong.db.plugins:select_by_cache_key(cache_key) + assert.is_nil(err) + assert.same(plugin2, read_plugin) + end) + + it("page_for_route", function() + local plugins_for_service, err = kong.db.plugins:page_for_service(service) + assert.is_nil(err) + assert.equal(2, #plugins_for_service) + for _, read_plugin in ipairs(plugins_for_service) do + if read_plugin.name == 'acl' then + assert.same(plugin, read_plugin) + elseif read_plugin.name == 'rate-limiting' then + assert.same(plugin2, read_plugin) + end + end + end) + + it("select_by_instance_name", function() + local read_plugin, err = kong.db.plugins:select_by_instance_name(plugin2.instance_name) + assert.is_nil(err) + assert.same(plugin2, read_plugin) + end) + + it("update_by_instance_name", function() + local newhost = "newhost" + local updated_plugin = utils.cycle_aware_deep_copy(plugin2) + updated_plugin.config.redis.host = newhost + updated_plugin.config.redis_host = newhost + + local read_plugin, err = kong.db.plugins:update_by_instance_name(plugin2.instance_name, updated_plugin) + assert.is_nil(err) + assert.same(updated_plugin, read_plugin) + end) + + it("upsert_by_instance_name", function() + -- existing plugin upsert (update part of upsert) + local newhost = "newhost" + local updated_plugin = utils.cycle_aware_deep_copy(plugin2) + updated_plugin.config.redis.host = newhost + updated_plugin.config.redis_host = newhost + + local read_plugin, err = kong.db.plugins:upsert_by_instance_name(plugin2.instance_name, updated_plugin) + assert.is_nil(err) + assert.same(updated_plugin, read_plugin) + + -- new plugin upsert (insert part of upsert) + local new_plugin_config = { + id = utils.uuid(), + enabled = true, + name = "rate-limiting", + instance_name = 'rate-limiting-instance-2', + service = service2, + config = { + minute = 200, + policy = "redis", + redis = { + host = "new-host-2" + } + }, + } + + local read_plugin, err = kong.db.plugins:upsert_by_instance_name(new_plugin_config.instance_name, new_plugin_config) + assert.is_nil(err) + assert.same(new_plugin_config.id, read_plugin.id) + assert.same(new_plugin_config.instance_name, read_plugin.instance_name) + assert.same(new_plugin_config.service.id, read_plugin.service.id) + assert.same(new_plugin_config.config.minute, read_plugin.config.minute) + assert.same(new_plugin_config.config.redis.host, read_plugin.config.redis.host) + assert.same(new_plugin_config.config.redis.host, read_plugin.config.redis_host) -- legacy field is included end) end) end From 703498efc75bd1d30c58d161a1e396e2f83477fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Wed, 7 Feb 2024 12:51:13 +0100 Subject: [PATCH 29/91] chore(dao): refactor expands shorthands check --- kong/db/dao/init.lua | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kong/db/dao/init.lua b/kong/db/dao/init.lua index 9f4f7854597..513735f3495 100644 --- a/kong/db/dao/init.lua +++ b/kong/db/dao/init.lua @@ -712,7 +712,7 @@ local function generate_foreign_key_methods(schema) end local entities, err - if options == nil or options.expand_shorthands == nil then + if options == nil or options.expand_shorthands ~= false then options = options or {} options.expand_shorthands = true end @@ -774,7 +774,7 @@ local function generate_foreign_key_methods(schema) end local err - if options == nil or options.expand_shorthands == nil then + if options == nil or options.expand_shorthands ~= false then options = options or {} options.expand_shorthands = true end @@ -1025,7 +1025,7 @@ function DAO:select(pk_or_entity, options) end local err - if options == nil or options.expand_shorthands == nil then + if options == nil or options.expand_shorthands ~= false then options = options or {} options.expand_shorthands = true end @@ -1082,7 +1082,7 @@ function DAO:page(size, offset, options) end local entities, err - if options == nil or options.expand_shorthands == nil then + if options == nil or options.expand_shorthands ~= false then options = options or {} options.expand_shorthands = true end @@ -1403,7 +1403,7 @@ function DAO:select_by_cache_key(cache_key, options) local err local ws_id = row.ws_id - if options == nil or options.expand_shorthands == nil then + if options == nil or options.expand_shorthands ~= false then options = options or {} options.expand_shorthands = true end From e1eb00f5e90f2d7c8e609d73e240e8fd26f5bbb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Wed, 7 Feb 2024 15:08:31 +0100 Subject: [PATCH 30/91] refactor(dao): move options nil check to the top of the functions --- kong/db/dao/init.lua | 65 +++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 40 deletions(-) diff --git a/kong/db/dao/init.lua b/kong/db/dao/init.lua index 513735f3495..0645b3e879b 100644 --- a/kong/db/dao/init.lua +++ b/kong/db/dao/init.lua @@ -482,15 +482,14 @@ end local function check_update(self, key, entity, options, name) - local transform - if options ~= nil then - local ok, errors = validate_options_value(self, options) - if not ok then - local err_t = self.errors:invalid_options(errors) - return nil, nil, tostring(err_t), err_t - end - transform = options.transform + options = options or {} + local ok, errors = validate_options_value(self, options) + if not ok then + local err_t = self.errors:invalid_options(errors) + return nil, nil, tostring(err_t), err_t end + local transform = options.transform + if transform == nil then transform = true @@ -503,7 +502,6 @@ local function check_update(self, key, entity, options, name) return nil, nil, tostring(err_t), err_t end - options = options or {} options.expand_shorthands = false local rbw_entity local err, err_t @@ -686,6 +684,7 @@ local function generate_foreign_key_methods(schema) local page_method_name = "page_for_" .. name methods[page_method_name] = function(self, foreign_key, size, offset, options) + options = options or {} local size, err, err_t = validate_pagination_method(self, field, foreign_key, size, offset, options) if not size then @@ -712,8 +711,7 @@ local function generate_foreign_key_methods(schema) end local entities, err - if options == nil or options.expand_shorthands ~= false then - options = options or {} + if options.expand_shorthands ~= false then options.expand_shorthands = true end entities, err, err_t = self:rows_to_entities(rows, options) @@ -751,6 +749,7 @@ local function generate_foreign_key_methods(schema) if field.unique or schema.endpoint_key == name then methods["select_by_" .. name] = function(self, unique_value, options) + options = options or {} local ok, err, err_t = validate_unique_row_method(self, name, field, unique_value, options) if not ok then return nil, err, err_t @@ -774,8 +773,7 @@ local function generate_foreign_key_methods(schema) end local err - if options == nil or options.expand_shorthands ~= false then - options = options or {} + if options.expand_shorthands ~= false then options.expand_shorthands = true end row, err, err_t = self:row_to_entity(row, options) @@ -795,6 +793,7 @@ local function generate_foreign_key_methods(schema) end methods["update_by_" .. name] = function(self, unique_value, entity, options) + options = options or {} local ok, err, err_t = validate_unique_row_method(self, name, field, unique_value, options) if not ok then return nil, err, err_t @@ -822,7 +821,6 @@ local function generate_foreign_key_methods(schema) return nil, tostring(err_t), err_t end - options = options or {} options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then @@ -843,6 +841,7 @@ local function generate_foreign_key_methods(schema) end methods["upsert_by_" .. name] = function(self, unique_value, entity, options) + options = options or {} local ok, err, err_t = validate_unique_row_method(self, name, field, unique_value, options) if not ok then return nil, err, err_t @@ -874,7 +873,6 @@ local function generate_foreign_key_methods(schema) end local ws_id = row.ws_id - options = options or {} options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then @@ -988,11 +986,9 @@ end function DAO:select(pk_or_entity, options) + options = options or {} validate_primary_key_type(pk_or_entity) - - if options ~= nil then - validate_options_type(options) - end + validate_options_type(options) local primary_key = self.schema:extract_pk_values(pk_or_entity) local ok, errors = self.schema:validate_primary_key(primary_key) @@ -1025,8 +1021,7 @@ function DAO:select(pk_or_entity, options) end local err - if options == nil or options.expand_shorthands ~= false then - options = options or {} + if options.expand_shorthands ~= false then options.expand_shorthands = true end row, err, err_t = self:row_to_entity(row, options) @@ -1082,8 +1077,7 @@ function DAO:page(size, offset, options) end local entities, err - if options == nil or options.expand_shorthands ~= false then - options = options or {} + if options.expand_shorthands ~= false then options.expand_shorthands = true end entities, err, err_t = self:rows_to_entities(rows, options) @@ -1148,11 +1142,9 @@ end function DAO:insert(entity, options) + options = options or {} validate_entity_type(entity) - - if options ~= nil then - validate_options_type(options) - end + validate_options_type(options) local entity_to_insert, err, err_t = check_insert(self, entity, options) if not entity_to_insert then @@ -1170,7 +1162,6 @@ function DAO:insert(entity, options) end local ws_id = row.ws_id - options = options or {} options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then @@ -1189,12 +1180,10 @@ end function DAO:update(pk_or_entity, entity, options) + options = options or {} validate_primary_key_type(pk_or_entity) validate_entity_type(entity) - - if options ~= nil then - validate_options_type(options) - end + validate_options_type(options) local primary_key = self.schema:extract_pk_values(pk_or_entity) local ok, errors = self.schema:validate_primary_key(primary_key) @@ -1225,7 +1214,6 @@ function DAO:update(pk_or_entity, entity, options) end local ws_id = row.ws_id - options = options or {} options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then @@ -1244,12 +1232,10 @@ end function DAO:upsert(pk_or_entity, entity, options) + options = options or {} validate_primary_key_type(pk_or_entity) validate_entity_type(entity) - - if options ~= nil then - validate_options_type(options) - end + validate_options_type(options) local primary_key = self.schema:extract_pk_values(pk_or_entity) local ok, errors = self.schema:validate_primary_key(primary_key) @@ -1280,7 +1266,6 @@ function DAO:upsert(pk_or_entity, entity, options) end local ws_id = row.ws_id - options = options or {} options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then @@ -1371,6 +1356,7 @@ end function DAO:select_by_cache_key(cache_key, options) + options = options or {} local ck_definition = self.schema.cache_key if not ck_definition then error("entity does not have a cache_key defined", 2) @@ -1403,8 +1389,7 @@ function DAO:select_by_cache_key(cache_key, options) local err local ws_id = row.ws_id - if options == nil or options.expand_shorthands ~= false then - options = options or {} + if options.expand_shorthands ~= false then options.expand_shorthands = true end row, err, err_t = self:row_to_entity(row, options) From 76ac6594a068d2ccb5a45306685d7349dfea40b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Wed, 7 Feb 2024 16:12:50 +0100 Subject: [PATCH 31/91] refactor(dao): chang expand_shorthands to hide_shorthands --- kong/db/dao/init.lua | 30 +++++++----------------------- kong/db/dao/plugins.lua | 3 ++- kong/db/schema/init.lua | 2 +- 3 files changed, 10 insertions(+), 25 deletions(-) diff --git a/kong/db/dao/init.lua b/kong/db/dao/init.lua index 0645b3e879b..c58928dfb31 100644 --- a/kong/db/dao/init.lua +++ b/kong/db/dao/init.lua @@ -502,18 +502,22 @@ local function check_update(self, key, entity, options, name) return nil, nil, tostring(err_t), err_t end - options.expand_shorthands = false local rbw_entity local err, err_t if name then - rbw_entity, err, err_t = self["select_by_" .. name](self, key, options) + options.hide_shorthands = true + rbw_entity, err, err_t = self["select_by_" .. name](self, key, options) + options.hide_shorthands = false else - rbw_entity, err, err_t = self:select(key, options) + options.hide_shorthands = true + rbw_entity, err, err_t = self:select(key, options) + options.hide_shorthands = false end if err then return nil, nil, err, err_t end + if rbw_entity and check_immutable_fields then local ok, errors = self.schema:validate_immutable_fields(entity_to_update, rbw_entity) if not ok then @@ -711,9 +715,6 @@ local function generate_foreign_key_methods(schema) end local entities, err - if options.expand_shorthands ~= false then - options.expand_shorthands = true - end entities, err, err_t = self:rows_to_entities(rows, options) if err then return nil, err, err_t @@ -773,9 +774,6 @@ local function generate_foreign_key_methods(schema) end local err - if options.expand_shorthands ~= false then - options.expand_shorthands = true - end row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -821,7 +819,6 @@ local function generate_foreign_key_methods(schema) return nil, tostring(err_t), err_t end - options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -873,7 +870,6 @@ local function generate_foreign_key_methods(schema) end local ws_id = row.ws_id - options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -1021,9 +1017,6 @@ function DAO:select(pk_or_entity, options) end local err - if options.expand_shorthands ~= false then - options.expand_shorthands = true - end row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -1077,9 +1070,6 @@ function DAO:page(size, offset, options) end local entities, err - if options.expand_shorthands ~= false then - options.expand_shorthands = true - end entities, err, err_t = self:rows_to_entities(rows, options) if not entities then return nil, err, err_t @@ -1162,7 +1152,6 @@ function DAO:insert(entity, options) end local ws_id = row.ws_id - options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -1214,7 +1203,6 @@ function DAO:update(pk_or_entity, entity, options) end local ws_id = row.ws_id - options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -1266,7 +1254,6 @@ function DAO:upsert(pk_or_entity, entity, options) end local ws_id = row.ws_id - options.expand_shorthands = true row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t @@ -1389,9 +1376,6 @@ function DAO:select_by_cache_key(cache_key, options) local err local ws_id = row.ws_id - if options.expand_shorthands ~= false then - options.expand_shorthands = true - end row, err, err_t = self:row_to_entity(row, options) if not row then return nil, err, err_t diff --git a/kong/db/dao/plugins.lua b/kong/db/dao/plugins.lua index d94ff7d1cc2..bdb8e0c37c1 100644 --- a/kong/db/dao/plugins.lua +++ b/kong/db/dao/plugins.lua @@ -90,7 +90,7 @@ end function Plugins:update(primary_key, entity, options) options = options or {} - options.expand_shorthands = false + options.hide_shorthands = true local rbw_entity = self.super.select(self, primary_key, options) -- ignore errors if rbw_entity then entity = self.schema:merge_values(entity, rbw_entity) @@ -100,6 +100,7 @@ function Plugins:update(primary_key, entity, options) return nil, err, err_t end + options.hide_shorthands = false return self.super.update(self, primary_key, entity, options) end diff --git a/kong/db/schema/init.lua b/kong/db/schema/init.lua index 86e8f88fe21..5f2a579d251 100644 --- a/kong/db/schema/init.lua +++ b/kong/db/schema/init.lua @@ -1681,7 +1681,7 @@ function Schema:process_auto_fields(data, context, nulls, opts) end end - if is_select and sdata.translate_backwards and opts and opts.expand_shorthands then + if is_select and sdata.translate_backwards and opts and not(opts.hide_shorthands) then data[sname] = utils.table_path(data, sdata.translate_backwards) end end From a9e94351560b4727f5dd14ab14e8f5919dec76a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Wed, 7 Feb 2024 16:32:22 +0100 Subject: [PATCH 32/91] refactor(dao): remove unnecessary `option = options or {}` guards --- kong/db/dao/init.lua | 30 ++++++++++++++++-------------- kong/db/schema/init.lua | 2 +- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/kong/db/dao/init.lua b/kong/db/dao/init.lua index c58928dfb31..72eb82fbed7 100644 --- a/kong/db/dao/init.lua +++ b/kong/db/dao/init.lua @@ -517,7 +517,6 @@ local function check_update(self, key, entity, options, name) return nil, nil, err, err_t end - if rbw_entity and check_immutable_fields then local ok, errors = self.schema:validate_immutable_fields(entity_to_update, rbw_entity) if not ok then @@ -688,7 +687,6 @@ local function generate_foreign_key_methods(schema) local page_method_name = "page_for_" .. name methods[page_method_name] = function(self, foreign_key, size, offset, options) - options = options or {} local size, err, err_t = validate_pagination_method(self, field, foreign_key, size, offset, options) if not size then @@ -750,7 +748,6 @@ local function generate_foreign_key_methods(schema) if field.unique or schema.endpoint_key == name then methods["select_by_" .. name] = function(self, unique_value, options) - options = options or {} local ok, err, err_t = validate_unique_row_method(self, name, field, unique_value, options) if not ok then return nil, err, err_t @@ -791,7 +788,6 @@ local function generate_foreign_key_methods(schema) end methods["update_by_" .. name] = function(self, unique_value, entity, options) - options = options or {} local ok, err, err_t = validate_unique_row_method(self, name, field, unique_value, options) if not ok then return nil, err, err_t @@ -838,7 +834,6 @@ local function generate_foreign_key_methods(schema) end methods["upsert_by_" .. name] = function(self, unique_value, entity, options) - options = options or {} local ok, err, err_t = validate_unique_row_method(self, name, field, unique_value, options) if not ok then return nil, err, err_t @@ -982,9 +977,11 @@ end function DAO:select(pk_or_entity, options) - options = options or {} validate_primary_key_type(pk_or_entity) - validate_options_type(options) + + if options ~= nil then + validate_options_type(options) + end local primary_key = self.schema:extract_pk_values(pk_or_entity) local ok, errors = self.schema:validate_primary_key(primary_key) @@ -1132,9 +1129,11 @@ end function DAO:insert(entity, options) - options = options or {} validate_entity_type(entity) - validate_options_type(options) + + if options ~= nil then + validate_options_type(options) + end local entity_to_insert, err, err_t = check_insert(self, entity, options) if not entity_to_insert then @@ -1169,10 +1168,12 @@ end function DAO:update(pk_or_entity, entity, options) - options = options or {} validate_primary_key_type(pk_or_entity) validate_entity_type(entity) - validate_options_type(options) + + if options ~= nil then + validate_options_type(options) + end local primary_key = self.schema:extract_pk_values(pk_or_entity) local ok, errors = self.schema:validate_primary_key(primary_key) @@ -1220,10 +1221,12 @@ end function DAO:upsert(pk_or_entity, entity, options) - options = options or {} validate_primary_key_type(pk_or_entity) validate_entity_type(entity) - validate_options_type(options) + + if options ~= nil then + validate_options_type(options) + end local primary_key = self.schema:extract_pk_values(pk_or_entity) local ok, errors = self.schema:validate_primary_key(primary_key) @@ -1343,7 +1346,6 @@ end function DAO:select_by_cache_key(cache_key, options) - options = options or {} local ck_definition = self.schema.cache_key if not ck_definition then error("entity does not have a cache_key defined", 2) diff --git a/kong/db/schema/init.lua b/kong/db/schema/init.lua index 5f2a579d251..a910df28a5f 100644 --- a/kong/db/schema/init.lua +++ b/kong/db/schema/init.lua @@ -1681,7 +1681,7 @@ function Schema:process_auto_fields(data, context, nulls, opts) end end - if is_select and sdata.translate_backwards and opts and not(opts.hide_shorthands) then + if is_select and sdata.translate_backwards and not(opts and opts.hide_shorthands) then data[sname] = utils.table_path(data, sdata.translate_backwards) end end From 82d8c7f415dd9521c5baad1a0803c498ba23ba1f Mon Sep 17 00:00:00 2001 From: samugi Date: Mon, 5 Feb 2024 13:18:41 +0100 Subject: [PATCH 33/91] feat(ci): use local build for upgrade tests Before this commit, the upgrade tests used container images for the old and the new Kong versions. The Lua files from the checked-out repository branch were then installed in the new version container. This only worked if the binaries in the container were compatible with the Lua code. This commit changes the upgrade tests so that for the new version, the local build is used instead of a patched container. --- .github/workflows/upgrade-tests.yml | 38 +++++++------------- scripts/upgrade-tests/docker-compose.yml | 33 +++-------------- scripts/upgrade-tests/test-upgrade-path.sh | 42 +++++++++++----------- 3 files changed, 38 insertions(+), 75 deletions(-) diff --git a/.github/workflows/upgrade-tests.yml b/.github/workflows/upgrade-tests.yml index db8c8a2ff90..96effbccc5f 100644 --- a/.github/workflows/upgrade-tests.yml +++ b/.github/workflows/upgrade-tests.yml @@ -25,45 +25,33 @@ concurrency: cancel-in-progress: true env: GH_TOKEN: ${{ github.token }} + BUILD_ROOT: ${{ github.workspace }}/bazel-bin/build jobs: + build: + uses: ./.github/workflows/build.yml + with: + relative-build-root: bazel-bin/build + upgrade-test: name: Run migration tests runs-on: ubuntu-22.04 + needs: build steps: - - name: Install Prerequisites - run: | - sudo apt-get -y update - sudo apt-get -y install ca-certificates curl gnupg lsb-release jq libyaml-dev net-tools - sudo mkdir -p /etc/apt/keyrings - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg - echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt-get update - sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin - - name: Clone Source Code uses: actions/checkout@v4 with: fetch-depth: 0 submodules: recursive - - name: Build Debian Package - run: | - make package/deb - mv bazel-bin/pkg/kong.amd64.deb . - - - name: Build Docker Image - uses: docker/build-push-action@v5 + - name: Lookup build cache + id: cache-deps + uses: actions/cache@v3 with: - file: build/dockerfiles/deb.Dockerfile - context: . - push: false - tags: "kong-local/kong:latest" - build-args: | - KONG_BASE_IMAGE=ubuntu:22.04 - KONG_ARTIFACT_PATH=./ + path: ${{ env.BUILD_ROOT }} + key: ${{ needs.build.outputs.cache-key }} - name: Run Upgrade Tests run: | - bash ./scripts/upgrade-tests/test-upgrade-path.sh -i kong-local/kong:latest + bash ./scripts/upgrade-tests/test-upgrade-path.sh -i ${{ env.BUILD_ROOT }}/kong-dev-venv.sh diff --git a/scripts/upgrade-tests/docker-compose.yml b/scripts/upgrade-tests/docker-compose.yml index a127a91b011..8cf757006c1 100644 --- a/scripts/upgrade-tests/docker-compose.yml +++ b/scripts/upgrade-tests/docker-compose.yml @@ -13,33 +13,12 @@ services: timeout: 1s retries: 10 environment: - KONG_PG_HOST: db_postgres - KONG_TEST_PG_HOST: db_postgres + KONG_PG_HOST: localhost + KONG_TEST_PG_HOST: localhost volumes: - ../../worktree/${OLD_KONG_VERSION}:/kong restart: on-failure - networks: - upgrade_tests: - - kong_new: - image: ${NEW_KONG_IMAGE} - command: "tail -f /dev/null" - user: root - depends_on: - - db_postgres - healthcheck: - test: ["CMD", "true"] - interval: 1s - timeout: 1s - retries: 10 - environment: - KONG_PG_HOST: db_postgres - KONG_TEST_PG_HOST: db_postgres - volumes: - - ../..:/kong - restart: on-failure - networks: - upgrade_tests: + network_mode: "host" db_postgres: image: postgres:9.5 @@ -55,8 +34,4 @@ services: restart: on-failure stdin_open: true tty: true - networks: - upgrade_tests: - -networks: - upgrade_tests: + network_mode: "host" diff --git a/scripts/upgrade-tests/test-upgrade-path.sh b/scripts/upgrade-tests/test-upgrade-path.sh index 835b264bbca..9f8638d110c 100755 --- a/scripts/upgrade-tests/test-upgrade-path.sh +++ b/scripts/upgrade-tests/test-upgrade-path.sh @@ -2,10 +2,10 @@ # This script runs the database upgrade tests from the # spec/05-migration directory. It uses docker compose to stand up a -# simple environment with postgres database server and -# two Kong nodes. One node contains the oldest supported version, the -# other has the current version of Kong. The testing is then done as -# described in https://docs.google.com/document/d/1Df-iq5tNyuPj1UNG7bkhecisJFPswOfFqlOS3V4wXSc/edit?usp=sharing +# simple environment with postgres database server and a Kong node. +# The node contains the oldest supported version, the current version +# of Kong is accessed via the local virtual environment. The testing is then +# done as described in https://docs.google.com/document/d/1Df-iq5tNyuPj1UNG7bkhecisJFPswOfFqlOS3V4wXSc/edit?usp=sharing # Normally, the testing environment and the git worktree that is # required by this script are removed when the tests have run. By @@ -36,14 +36,14 @@ function get_current_version() { export OLD_KONG_VERSION=2.8.0 export OLD_KONG_IMAGE=kong:$OLD_KONG_VERSION-ubuntu -export NEW_KONG_IMAGE=kong/kong:$(get_current_version kong) +export KONG_PG_HOST=localhost +export KONG_TEST_PG_HOST=localhost function usage() { cat 1>&2 < ] [ ... ] +usage: $0 [ -i ] [ ... ] - must be the name of a kong image to use as the base image for the - new kong version, based on this repository. + Script to source to set up Kong's virtual environment. EOF } @@ -58,7 +58,7 @@ set -- $args while :; do case "$1" in -i) - export NEW_KONG_IMAGE=$2 + venv_script=$2 shift shift ;; @@ -82,7 +82,6 @@ COMPOSE="docker compose -p $ENV_PREFIX -f scripts/upgrade-tests/docker-compose.y NETWORK_NAME=$ENV_PREFIX OLD_CONTAINER=$ENV_PREFIX-kong_old-1 -NEW_CONTAINER=$ENV_PREFIX-kong_new-1 function prepare_container() { docker exec $1 apt-get update @@ -97,11 +96,9 @@ function build_containers() { [ -d worktree/$OLD_KONG_VERSION ] || git worktree add worktree/$OLD_KONG_VERSION $OLD_KONG_VERSION $COMPOSE up --wait prepare_container $OLD_CONTAINER - prepare_container $NEW_CONTAINER docker exec -w /kong $OLD_CONTAINER make dev CRYPTO_DIR=/usr/local/kong # Kong version >= 3.3 moved non Bazel-built dev setup to make dev-legacy - docker exec -w /kong $NEW_CONTAINER make dev-legacy CRYPTO_DIR=/usr/local/kong - docker exec ${NEW_CONTAINER} ln -sf /kong/bin/kong /usr/local/bin/kong + make dev-legacy CRYPTO_DIR=/usr/local/kong } function initialize_test_list() { @@ -115,7 +112,7 @@ function initialize_test_list() { docker exec $OLD_CONTAINER kong migrations reset --yes || true docker exec $OLD_CONTAINER kong migrations bootstrap - docker exec $NEW_CONTAINER kong migrations status \ + kong migrations status \ | jq -r '.new_migrations | .[] | (.namespace | gsub("[.]"; "/")) as $namespace | .migrations[] | "\($namespace)/\(.)_spec.lua" | gsub("^kong"; "spec/05-migration")' \ | sort > $all_tests_file ls 2>/dev/null $(cat $all_tests_file) \ @@ -158,7 +155,8 @@ function initialize_test_list() { function run_tests() { # Run the tests - BUSTED="env KONG_DATABASE=$1 KONG_DNS_RESOLVER= KONG_TEST_PG_DATABASE=kong /kong/bin/busted" + BUSTED_ENV="env KONG_DATABASE=$1 KONG_DNS_RESOLVER= KONG_TEST_PG_DATABASE=kong" + shift set $TESTS @@ -173,25 +171,27 @@ function run_tests() { echo Running $TEST echo ">> Setting up tests" - docker exec -w /upgrade-test $OLD_CONTAINER $BUSTED -t setup $TEST + docker exec -w /upgrade-test $OLD_CONTAINER $BUSTED_ENV /kong/bin/busted -t setup $TEST echo ">> Running migrations" - docker exec $NEW_CONTAINER kong migrations up + kong migrations up echo ">> Testing old_after_up,all_phases" - docker exec -w /upgrade-test $OLD_CONTAINER $BUSTED -t old_after_up,all_phases $TEST + docker exec -w /upgrade-test $OLD_CONTAINER $BUSTED_ENV /kong/bin/busted -t old_after_up,all_phases $TEST echo ">> Testing new_after_up,all_phases" - docker exec -w /kong $NEW_CONTAINER $BUSTED -t new_after_up,all_phases $TEST + $BUSTED_ENV bin/busted -t new_after_up,all_phases $TEST echo ">> Finishing migrations" - docker exec $NEW_CONTAINER kong migrations finish + kong migrations finish echo ">> Testing new_after_finish,all_phases" - docker exec -w /kong $NEW_CONTAINER $BUSTED -t new_after_finish,all_phases $TEST + $BUSTED_ENV bin/busted -t new_after_finish,all_phases $TEST done } function cleanup() { git worktree remove worktree/$OLD_KONG_VERSION --force $COMPOSE down + deactivate } +source $venv_script build_containers initialize_test_list run_tests postgres From bc9e00b71fca1aa71dfcfe810c1e0f9764220474 Mon Sep 17 00:00:00 2001 From: samugi Date: Mon, 5 Feb 2024 15:37:42 +0100 Subject: [PATCH 34/91] feat(ci): run upgrade tests for multiple "old" versions Currently only 2.8.0 is used to run migration tests, all the way up to the "new" (current) version. This means that only features that are shared across all versions from "old" to "new" can be tested, e.g. a plugin that is not available in `2.8.0` cannot be configured and used in migration tests. This commit introduces a list of "old_versions" and repeats the tests for each. Tests can use the `OLD_KONG_VERSION` environment variable to determine whether they should execute for the current version. --- scripts/upgrade-tests/source-versions | 2 + scripts/upgrade-tests/test-upgrade-path.sh | 51 +++++++++++-------- .../migrations/001_280_to_300_spec.lua | 5 +- .../migrations/001_280_to_300_spec.lua | 7 ++- .../migrations/001_280_to_300_spec.lua | 5 +- 5 files changed, 45 insertions(+), 25 deletions(-) create mode 100644 scripts/upgrade-tests/source-versions diff --git a/scripts/upgrade-tests/source-versions b/scripts/upgrade-tests/source-versions new file mode 100644 index 00000000000..bd9f2571559 --- /dev/null +++ b/scripts/upgrade-tests/source-versions @@ -0,0 +1,2 @@ +2.8.0 +3.4.0 diff --git a/scripts/upgrade-tests/test-upgrade-path.sh b/scripts/upgrade-tests/test-upgrade-path.sh index 9f8638d110c..8144fd9513f 100755 --- a/scripts/upgrade-tests/test-upgrade-path.sh +++ b/scripts/upgrade-tests/test-upgrade-path.sh @@ -23,19 +23,6 @@ set -e trap "echo exiting because of error" 0 -function get_current_version() { - local image_tag=$1 - local version_from_rockspec=$(perl -ne 'print "$1\n" if (/^\s*tag = "(.*)"/)' kong*.rockspec) - if docker pull $image_tag:$version_from_rockspec >/dev/null 2>/dev/null - then - echo $version_from_rockspec-ubuntu - else - echo master-ubuntu - fi -} - -export OLD_KONG_VERSION=2.8.0 -export OLD_KONG_IMAGE=kong:$OLD_KONG_VERSION-ubuntu export KONG_PG_HOST=localhost export KONG_TEST_PG_HOST=localhost @@ -91,13 +78,19 @@ function prepare_container() { } function build_containers() { + # Kong version >= 3.3 moved non Bazel-built dev setup to make dev-legacy + if (( $(echo "$OLD_KONG_VERSION" | sed 's/\.//g') >= 330 )); then + old_make_target="dev-legacy" + else + old_make_target="dev" + fi + echo "Building containers" [ -d worktree/$OLD_KONG_VERSION ] || git worktree add worktree/$OLD_KONG_VERSION $OLD_KONG_VERSION $COMPOSE up --wait prepare_container $OLD_CONTAINER - docker exec -w /kong $OLD_CONTAINER make dev CRYPTO_DIR=/usr/local/kong - # Kong version >= 3.3 moved non Bazel-built dev setup to make dev-legacy + docker exec -w /kong $OLD_CONTAINER make $old_make_target CRYPTO_DIR=/usr/local/kong make dev-legacy CRYPTO_DIR=/usr/local/kong } @@ -155,7 +148,7 @@ function initialize_test_list() { function run_tests() { # Run the tests - BUSTED_ENV="env KONG_DATABASE=$1 KONG_DNS_RESOLVER= KONG_TEST_PG_DATABASE=kong" + BUSTED_ENV="env KONG_DATABASE=$1 KONG_DNS_RESOLVER= KONG_TEST_PG_DATABASE=kong OLD_KONG_VERSION=$OLD_KONG_VERSION" shift @@ -186,15 +179,29 @@ function run_tests() { } function cleanup() { - git worktree remove worktree/$OLD_KONG_VERSION --force + sudo git worktree remove worktree/$OLD_KONG_VERSION --force $COMPOSE down - deactivate } + source $venv_script -build_containers -initialize_test_list -run_tests postgres -[ -z "$UPGRADE_ENV_PREFIX" ] && cleanup + +# Load supported "old" versions to run migration tests against +old_versions=() +mapfile -t old_versions < "scripts/upgrade-tests/source-versions" + +for old_version in "${old_versions[@]}"; do + export OLD_KONG_VERSION=$old_version + export OLD_KONG_IMAGE=kong:$OLD_KONG_VERSION-ubuntu + + echo "Running tests using $OLD_KONG_VERSION as \"old version\" of Kong" + + build_containers + initialize_test_list + run_tests postgres + [ -z "$UPGRADE_ENV_PREFIX" ] && cleanup +done + +deactivate trap "" 0 diff --git a/spec/05-migration/plugins/http-log/migrations/001_280_to_300_spec.lua b/spec/05-migration/plugins/http-log/migrations/001_280_to_300_spec.lua index 320b15096fc..1264a2c8f10 100644 --- a/spec/05-migration/plugins/http-log/migrations/001_280_to_300_spec.lua +++ b/spec/05-migration/plugins/http-log/migrations/001_280_to_300_spec.lua @@ -8,7 +8,10 @@ local uh = require "spec.upgrade_helpers" -- to test the migration process. do not change it to use dynamic port. local HTTP_PORT = 29100 -describe("http-log plugin migration", function() +local OLD_KONG_VERSION = os.getenv("OLD_KONG_VERSION") +local handler = OLD_KONG_VERSION:sub(1,3) == "2.8" and describe or pending + +handler("http-log plugin migration", function() local mock lazy_setup(function() assert(uh.start_kong()) diff --git a/spec/05-migration/plugins/post-function/migrations/001_280_to_300_spec.lua b/spec/05-migration/plugins/post-function/migrations/001_280_to_300_spec.lua index dab5fa5583a..ed3fdfb8f92 100644 --- a/spec/05-migration/plugins/post-function/migrations/001_280_to_300_spec.lua +++ b/spec/05-migration/plugins/post-function/migrations/001_280_to_300_spec.lua @@ -1,7 +1,12 @@ local uh = require "spec/upgrade_helpers" -describe("post-function plugin migration", function() + +local OLD_KONG_VERSION = os.getenv("OLD_KONG_VERSION") +local handler = OLD_KONG_VERSION:sub(1,3) == "2.8" and describe or pending + + +handler("post-function plugin migration", function() lazy_setup(function() assert(uh.start_kong()) diff --git a/spec/05-migration/plugins/pre-function/migrations/001_280_to_300_spec.lua b/spec/05-migration/plugins/pre-function/migrations/001_280_to_300_spec.lua index 5b77e3339e9..d4a43838082 100644 --- a/spec/05-migration/plugins/pre-function/migrations/001_280_to_300_spec.lua +++ b/spec/05-migration/plugins/pre-function/migrations/001_280_to_300_spec.lua @@ -1,7 +1,10 @@ local uh = require "spec/upgrade_helpers" -describe("pre-function plugin migration", function() +local OLD_KONG_VERSION = os.getenv("OLD_KONG_VERSION") +local handler = OLD_KONG_VERSION:sub(1,3) == "2.8" and describe or pending + +handler("pre-function plugin migration", function() lazy_setup(function() assert(uh.start_kong()) From d48c63d0cb3a8adc3c55e9343c0e92979562fc79 Mon Sep 17 00:00:00 2001 From: Jun Ouyang Date: Mon, 12 Feb 2024 16:13:46 +0800 Subject: [PATCH 35/91] fix(otel): fix otel sampling mode lua panic bug when http_response_header_for_traceid option enable (#12544) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(otel): fix otel sampling mode lua panic bug when http_response_header_for_traceid option enable * fix(otel): fix code * fix(otel): fix code * fix(otel): fix code --------- Co-authored-by: Hans Hübner --- ...ling-panic-when-header-trace-id-enable.yml | 3 ++ kong/plugins/opentelemetry/handler.lua | 6 ++-- .../37-opentelemetry/05-otelcol_spec.lua | 28 +++++++++++++++++++ 3 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 changelog/unreleased/kong/otel-sampling-panic-when-header-trace-id-enable.yml diff --git a/changelog/unreleased/kong/otel-sampling-panic-when-header-trace-id-enable.yml b/changelog/unreleased/kong/otel-sampling-panic-when-header-trace-id-enable.yml new file mode 100644 index 00000000000..5efdededa3b --- /dev/null +++ b/changelog/unreleased/kong/otel-sampling-panic-when-header-trace-id-enable.yml @@ -0,0 +1,3 @@ +message: "**Opentelemetry**: fix otel sampling mode lua panic bug when http_response_header_for_traceid option enable" +type: bugfix +scope: Plugin diff --git a/kong/plugins/opentelemetry/handler.lua b/kong/plugins/opentelemetry/handler.lua index b2f1f7e0db2..a265e57c21f 100644 --- a/kong/plugins/opentelemetry/handler.lua +++ b/kong/plugins/opentelemetry/handler.lua @@ -158,8 +158,10 @@ function OpenTelemetryHandler:header_filter(conf) local root_span = ngx.ctx.KONG_SPANS and ngx.ctx.KONG_SPANS[1] trace_id = root_span and root_span.trace_id end - trace_id = to_hex(trace_id) - kong.response.add_header(conf.http_response_header_for_traceid, trace_id) + if trace_id then + trace_id = to_hex(trace_id) + kong.response.add_header(conf.http_response_header_for_traceid, trace_id) + end end end diff --git a/spec/03-plugins/37-opentelemetry/05-otelcol_spec.lua b/spec/03-plugins/37-opentelemetry/05-otelcol_spec.lua index ca4fb585e38..5a96f3ffd3e 100644 --- a/spec/03-plugins/37-opentelemetry/05-otelcol_spec.lua +++ b/spec/03-plugins/37-opentelemetry/05-otelcol_spec.lua @@ -120,6 +120,34 @@ for _, strategy in helpers.each_strategy() do return #parts > 0 end, 10) end) + + it("send traces with config http_response_header_for_traceid enable and tracing_sampling_rate option", function() + assert(helpers.restart_kong { + database = strategy, + nginx_conf = "spec/fixtures/custom_nginx.template", + plugins = "opentelemetry", + tracing_instrumentations = "all", + tracing_sampling_rate = 0.00005, + }) + + proxy_url = fmt("http://%s:%s", helpers.get_proxy_ip(), helpers.get_proxy_port()) + proxy_url_enable_traceid = fmt("http://%s:%s/enable_response_header_traceid", helpers.get_proxy_ip(), helpers.get_proxy_port()) + + local httpc = http.new() + for i = 1, 100 do + local res, err = httpc:request_uri(proxy_url_enable_traceid) + assert.is_nil(err) + assert.same(200, res.status) + if res.headers["x-trace-id"] then + local trace_id = res.headers["x-trace-id"] + local trace_id_regex = [[^[a-f0-9]{32}$]] + local m = ngx.re.match(trace_id, trace_id_regex, "jo") + assert.True(m ~= nil, "trace_id does not match regex: " .. trace_id_regex) + end + end + httpc:close() + end) + end) end) From e1e6071dd2e402b4c2b09cb9afe06b3b0a95d3f6 Mon Sep 17 00:00:00 2001 From: Joshua Schmid Date: Mon, 12 Feb 2024 10:20:40 +0100 Subject: [PATCH 36/91] fix(chore): render description correctly in cherry-picks --- .github/workflows/cherry-picks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cherry-picks.yml b/.github/workflows/cherry-picks.yml index 1510d2cdb21..4886291dae9 100644 --- a/.github/workflows/cherry-picks.yml +++ b/.github/workflows/cherry-picks.yml @@ -37,7 +37,7 @@ jobs: ## Original description - #{pull_description} + ${pull_description} upstream_repo: 'kong/kong-ee' branch_map: |- { From b0940b2b00640ca8d085c9f7a20ced09b398a40f Mon Sep 17 00:00:00 2001 From: samugi Date: Fri, 2 Feb 2024 12:59:32 +0100 Subject: [PATCH 37/91] fix(opentelemetry): increase default queue batch size migration to update the wrongly set default queue batch size to 200 adapt test to run only for 3.x --- kong-3.7.0-0.rockspec | 3 + kong/db/migrations/operations/331_to_332.lua | 68 +++++++++++++++++++ .../migrations/001_331_to_332.lua | 23 +++++++ .../plugins/opentelemetry/migrations/init.lua | 3 + .../migrations/001_331_to_332_spec.lua | 59 ++++++++++++++++ spec/upgrade_helpers.lua | 39 ++++++++++- 6 files changed, 194 insertions(+), 1 deletion(-) create mode 100644 kong/db/migrations/operations/331_to_332.lua create mode 100644 kong/plugins/opentelemetry/migrations/001_331_to_332.lua create mode 100644 kong/plugins/opentelemetry/migrations/init.lua create mode 100644 spec/05-migration/plugins/opentelemetry/migrations/001_331_to_332_spec.lua diff --git a/kong-3.7.0-0.rockspec b/kong-3.7.0-0.rockspec index cca7ee53d66..61fa53a8f27 100644 --- a/kong-3.7.0-0.rockspec +++ b/kong-3.7.0-0.rockspec @@ -293,6 +293,7 @@ build = { ["kong.db.migrations.operations.200_to_210"] = "kong/db/migrations/operations/200_to_210.lua", ["kong.db.migrations.operations.212_to_213"] = "kong/db/migrations/operations/212_to_213.lua", ["kong.db.migrations.operations.280_to_300"] = "kong/db/migrations/operations/280_to_300.lua", + ["kong.db.migrations.operations.331_to_332"] = "kong/db/migrations/operations/331_to_332.lua", ["kong.db.migrations.migrate_path_280_300"] = "kong/db/migrations/migrate_path_280_300.lua", ["kong.db.declarative.migrations"] = "kong/db/declarative/migrations/init.lua", ["kong.db.declarative.migrations.route_path"] = "kong/db/declarative/migrations/route_path.lua", @@ -557,6 +558,8 @@ build = { ["kong.plugins.azure-functions.handler"] = "kong/plugins/azure-functions/handler.lua", ["kong.plugins.azure-functions.schema"] = "kong/plugins/azure-functions/schema.lua", + ["kong.plugins.opentelemetry.migrations"] = "kong/plugins/opentelemetry/migrations/init.lua", + ["kong.plugins.opentelemetry.migrations.001_331_to_332"] = "kong/plugins/opentelemetry/migrations/001_331_to_332.lua", ["kong.plugins.opentelemetry.handler"] = "kong/plugins/opentelemetry/handler.lua", ["kong.plugins.opentelemetry.schema"] = "kong/plugins/opentelemetry/schema.lua", ["kong.plugins.opentelemetry.proto"] = "kong/plugins/opentelemetry/proto.lua", diff --git a/kong/db/migrations/operations/331_to_332.lua b/kong/db/migrations/operations/331_to_332.lua new file mode 100644 index 00000000000..577ec92075c --- /dev/null +++ b/kong/db/migrations/operations/331_to_332.lua @@ -0,0 +1,68 @@ +-- Helper module for 331_to_332 migration operations. +-- +-- Operations are versioned and specific to a migration so they remain +-- fixed in time and are not modified for use in future migrations. +-- +-- If you want to reuse these operations in a future migration, +-- copy the functions over to a new versioned module. + + +local function render(template, keys) + return (template:gsub("$%(([A-Z_]+)%)", keys)) +end + + +-------------------------------------------------------------------------------- +-- Postgres operations for Workspace migration +-------------------------------------------------------------------------------- + + +local postgres = { + + up = {}, + + teardown = { + + ------------------------------------------------------------------------------ + -- General function to fixup a plugin configuration + fixup_plugin_config = function(_, connector, plugin_name, fixup_fn) + local pgmoon_json = require("pgmoon.json") + local select_plugin = render( + "SELECT id, name, config FROM plugins WHERE name = '$(NAME)'", { + NAME = plugin_name + }) + + local plugins, err = connector:query(select_plugin) + if not plugins then + return nil, err + end + + for _, plugin in ipairs(plugins) do + local fix = fixup_fn(plugin.config) + if fix then + local sql = render( + "UPDATE plugins SET config = $(NEW_CONFIG)::jsonb WHERE id = '$(ID)'", { + NEW_CONFIG = pgmoon_json.encode_json(plugin.config), + ID = plugin.id, + }) + + local _, err = connector:query(sql) + if err then + return nil, err + end + end + end + + return true + end, + }, + +} + + +-------------------------------------------------------------------------------- + + +return { + postgres = postgres, +} diff --git a/kong/plugins/opentelemetry/migrations/001_331_to_332.lua b/kong/plugins/opentelemetry/migrations/001_331_to_332.lua new file mode 100644 index 00000000000..3916fba7203 --- /dev/null +++ b/kong/plugins/opentelemetry/migrations/001_331_to_332.lua @@ -0,0 +1,23 @@ +local operations = require "kong.db.migrations.operations.331_to_332" + + +local function ws_migration_teardown(ops) + return function(connector) + return ops:fixup_plugin_config(connector, "opentelemetry", function(config) + if config.queue.max_batch_size == 1 then + config.queue.max_batch_size = 200 + return true + end + + return false + end) + end +end + + +return { + postgres = { + up = "", + teardown = ws_migration_teardown(operations.postgres.teardown), + }, +} diff --git a/kong/plugins/opentelemetry/migrations/init.lua b/kong/plugins/opentelemetry/migrations/init.lua new file mode 100644 index 00000000000..f6d97d6c4ad --- /dev/null +++ b/kong/plugins/opentelemetry/migrations/init.lua @@ -0,0 +1,3 @@ +return { + "001_331_to_332", +} diff --git a/spec/05-migration/plugins/opentelemetry/migrations/001_331_to_332_spec.lua b/spec/05-migration/plugins/opentelemetry/migrations/001_331_to_332_spec.lua new file mode 100644 index 00000000000..b385c2db05f --- /dev/null +++ b/spec/05-migration/plugins/opentelemetry/migrations/001_331_to_332_spec.lua @@ -0,0 +1,59 @@ + +local cjson = require "cjson" +local uh = require "spec.upgrade_helpers" + + +if uh.database_type() == 'postgres' then + local handler = uh.get_busted_handler("3.3.0", "3.6.0") + handler("opentelemetry plugin migration", function() + lazy_setup(function() + assert(uh.start_kong()) + end) + + lazy_teardown(function () + assert(uh.stop_kong(nil, true)) + end) + + uh.setup(function () + local admin_client = assert(uh.admin_client()) + + local res = assert(admin_client:send { + method = "POST", + path = "/plugins/", + body = { + name = "opentelemetry", + config = { + endpoint = "http://localhost:8080/v1/traces", + } + }, + headers = { + ["Content-Type"] = "application/json" + } + }) + local body = assert.res_status(201, res) + local json = cjson.decode(body) + -- assert that value of old default is 1 + assert.equals(json.config.queue.max_batch_size, 1) + admin_client:close() + end) + + uh.new_after_finish("has updated opentelemetry queue max_batch_size configuration", function () + local admin_client = assert(uh.admin_client()) + local res = assert(admin_client:send { + method = "GET", + path = "/plugins/" + }) + local body = cjson.decode(assert.res_status(200, res)) + assert.equal(1, #body.data) + assert.equal("opentelemetry", body.data[1].name) + local expected_config = { + endpoint = "http://localhost:8080/v1/traces", + queue = { + max_batch_size = 200 + }, + } + assert.partial_match(expected_config, body.data[1].config) + admin_client:close() + end) + end) +end diff --git a/spec/upgrade_helpers.lua b/spec/upgrade_helpers.lua index 00d8a5d45ce..394aa9dfbd0 100644 --- a/spec/upgrade_helpers.lua +++ b/spec/upgrade_helpers.lua @@ -179,6 +179,42 @@ local function all_phases(phrase, f) return it_when("all_phases", phrase, f) end + +--- Get a Busted test handler for migration tests. +-- +-- This convenience function determines the appropriate Busted handler +-- (`busted.describe` or `busted.pending`) based on the "old Kong version" +-- that migrations are running on and the specified version range. +-- +-- @function get_busted_handler +-- @param min_version The minimum Kong version (inclusive) +-- @param max_version The maximum Kong version (inclusive) +-- @return `busted.describe` if Kong's version is within the specified range, +-- `busted.pending` otherwise. +-- @usage +-- local handler = get_busted_handler("3.3.0", "3.6.0") +-- handler("some migration test", function() ... end) +local get_busted_handler +do + local function get_version_num(v1, v2) + if v2 then + assert(#v2 == #v1, string.format("different version format: %s and %s", v1, v2)) + end + return assert(tonumber((v1:gsub("%.", ""))), "invalid version: " .. v1) + end + + function get_busted_handler(min_version, max_version) + local old_version_var = assert(os.getenv("OLD_KONG_VERSION"), "old version not set") + local old_version = string.match(old_version_var, "[^/]+$") + + local old_version_num = get_version_num(old_version) + local min_v_num = min_version and get_version_num(min_version, old_version) or 0 + local max_v_num = max_version and get_version_num(max_version, old_version) or math.huge + + return old_version_num >= min_v_num and old_version_num <= max_v_num and busted.describe or busted.pending + end +end + return { database_type = database_type, get_database = get_database, @@ -192,5 +228,6 @@ return { old_after_up = old_after_up, new_after_up = new_after_up, new_after_finish = new_after_finish, - all_phases = all_phases + all_phases = all_phases, + get_busted_handler = get_busted_handler, } From da61296de0466fc29b8618f18d7b652ebb369ae9 Mon Sep 17 00:00:00 2001 From: samugi Date: Wed, 3 Jan 2024 13:54:43 +0100 Subject: [PATCH 38/91] feat(ci): dynamic test scheduler / balancer This reverts commit e804fd4b10a78df58c758831347cdc5006ff4b0f effectively reapplying 543004ca259c86e463767b17e782f064e43aa6ea. Original commit message: This commit adds an automatic scheduler for running busted tests. It replaces the static, shell script based scheduler by a mechanism that distributes the load onto a number of runners. Each runner gets to work on a portion of the tests that need to be run. The scheduler uses historic run time information to distribute the work evenly across runners, with the goal of making them all run for the same amount of time. With the 7 runners configured in the PR, the overall time it takes to run tests is reduced from around 30 minutes to around 11 minutes. Previously, the scheduling for tests was defined by what the run_tests.sh shell script did. This has now changed so that the new JSON file `test_suites.json` is instead used to define the tests that need to run. Like before, each of the test suites can have its own set of environment variables and test exclusions. The test runner has been rewritten in Javascript in order to make it easier to interface with the declarative configuration file and to facilitate reporting and interfacing with busted. It resides in the https://github.com/Kong/gateway-test-scheduler repository and provides its functionality through custom GitHub Actions. A couple of tests had to be changed to isolate them from other tests better. As the tests are no longer run in identical order every time, it has become more important that each test performs any required cleanup before it runs. --- .ci/run_tests.sh | 165 ------------ .ci/test_suites.json | 34 +++ .github/workflows/build_and_test.yml | 247 +++++++----------- .../update-test-runtime-statistics.yml | 35 +++ spec/busted-ci-helper.lua | 54 ++++ spec/busted-log-failed.lua | 33 --- 6 files changed, 216 insertions(+), 352 deletions(-) delete mode 100755 .ci/run_tests.sh create mode 100644 .ci/test_suites.json create mode 100644 .github/workflows/update-test-runtime-statistics.yml create mode 100644 spec/busted-ci-helper.lua delete mode 100644 spec/busted-log-failed.lua diff --git a/.ci/run_tests.sh b/.ci/run_tests.sh deleted file mode 100755 index 55f64dc03dd..00000000000 --- a/.ci/run_tests.sh +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env bash -set -e - -function cyan() { - echo -e "\033[1;36m$*\033[0m" -} - -function red() { - echo -e "\033[1;31m$*\033[0m" -} - -function get_failed { - if [ ! -z "$FAILED_TEST_FILES_FILE" -a -s "$FAILED_TEST_FILES_FILE" ] - then - cat < $FAILED_TEST_FILES_FILE - else - echo "$@" - fi -} - -BUSTED_ARGS="--keep-going -o htest -v --exclude-tags=flaky,ipv6" -if [ ! -z "$FAILED_TEST_FILES_FILE" ] -then - BUSTED_ARGS="--helper=spec/busted-log-failed.lua $BUSTED_ARGS" -fi - -if [ "$KONG_TEST_DATABASE" == "postgres" ]; then - export TEST_CMD="bin/busted $BUSTED_ARGS,off" - - psql -v ON_ERROR_STOP=1 -h localhost --username "$KONG_TEST_PG_USER" <<-EOSQL - CREATE user ${KONG_TEST_PG_USER}_ro; - GRANT CONNECT ON DATABASE $KONG_TEST_PG_DATABASE TO ${KONG_TEST_PG_USER}_ro; - \c $KONG_TEST_PG_DATABASE; - GRANT USAGE ON SCHEMA public TO ${KONG_TEST_PG_USER}_ro; - ALTER DEFAULT PRIVILEGES FOR ROLE $KONG_TEST_PG_USER IN SCHEMA public GRANT SELECT ON TABLES TO ${KONG_TEST_PG_USER}_ro; -EOSQL - -elif [ "$KONG_TEST_DATABASE" == "cassandra" ]; then - echo "Cassandra is no longer supported" - exit 1 - -else - export TEST_CMD="bin/busted $BUSTED_ARGS,postgres,db" -fi - -if [ "$TEST_SUITE" == "integration" ]; then - if [[ "$TEST_SPLIT" == first* ]]; then - # GitHub Actions, run first batch of integration tests - files=$(ls -d spec/02-integration/* | sort | grep -v 05-proxy) - files=$(get_failed $files) - eval "$TEST_CMD" $files - - elif [[ "$TEST_SPLIT" == second* ]]; then - # GitHub Actions, run second batch of integration tests - # Note that the split here is chosen carefully to result - # in a similar run time between the two batches, and should - # be adjusted if imbalance become significant in the future - files=$(ls -d spec/02-integration/* | sort | grep 05-proxy) - files=$(get_failed $files) - eval "$TEST_CMD" $files - - else - # Non GitHub Actions - eval "$TEST_CMD" $(get_failed spec/02-integration/) - fi -fi - -if [ "$TEST_SUITE" == "dbless" ]; then - eval "$TEST_CMD" $(get_failed spec/02-integration/02-cmd \ - spec/02-integration/05-proxy \ - spec/02-integration/04-admin_api/02-kong_routes_spec.lua \ - spec/02-integration/04-admin_api/15-off_spec.lua \ - spec/02-integration/08-status_api/01-core_routes_spec.lua \ - spec/02-integration/08-status_api/03-readiness_endpoint_spec.lua \ - spec/02-integration/11-dbless \ - spec/02-integration/20-wasm) -fi -if [ "$TEST_SUITE" == "plugins" ]; then - set +ex - rm -f .failed - - if [[ "$TEST_SPLIT" == first* ]]; then - # GitHub Actions, run first batch of plugin tests - PLUGINS=$(get_failed $(ls -d spec/03-plugins/* | head -n22)) - - elif [[ "$TEST_SPLIT" == second* ]]; then - # GitHub Actions, run second batch of plugin tests - # Note that the split here is chosen carefully to result - # in a similar run time between the two batches, and should - # be adjusted if imbalance become significant in the future - PLUGINS=$(get_failed $(ls -d spec/03-plugins/* | tail -n+23)) - - else - # Non GitHub Actions - PLUGINS=$(get_failed $(ls -d spec/03-plugins/*)) - fi - - for p in $PLUGINS; do - echo - cyan "--------------------------------------" - cyan $(basename $p) - cyan "--------------------------------------" - echo - - $TEST_CMD $p || echo "* $p" >> .failed - - # the suite is run multiple times for plugins: collect partial failures - if [ ! -z "$FAILED_TEST_FILES_FILE" ] - then - cat "$FAILED_TEST_FILES_FILE" >> "$FAILED_TEST_FILES_FILE.tmp" - fi - done - - if [ ! -z "$FAILED_TEST_FILES_FILE.tmp" -a -s "$FAILED_TEST_FILES_FILE.tmp" ] - then - mv "$FAILED_TEST_FILES_FILE.tmp" "$FAILED_TEST_FILES_FILE" - fi - - if [[ "$TEST_SPLIT" != first* ]]; then - cat kong-*.rockspec | grep kong- | grep -v zipkin | grep -v sidecar | grep "~" | grep -v kong-prometheus-plugin | while read line ; do - REPOSITORY=`echo $line | sed "s/\"/ /g" | awk -F" " '{print $1}'` - VERSION=`luarocks show $REPOSITORY | grep $REPOSITORY | head -1 | awk -F" " '{print $2}' | cut -f1 -d"-"` - REPOSITORY=`echo $REPOSITORY | sed -e 's/kong-prometheus-plugin/kong-plugin-prometheus/g'` - REPOSITORY=`echo $REPOSITORY | sed -e 's/kong-proxy-cache-plugin/kong-plugin-proxy-cache/g'` - - echo - cyan "--------------------------------------" - cyan $REPOSITORY $VERSION - cyan "--------------------------------------" - echo - - git clone https://github.com/Kong/$REPOSITORY.git --branch $VERSION --single-branch /tmp/test-$REPOSITORY || \ - git clone https://github.com/Kong/$REPOSITORY.git --branch v$VERSION --single-branch /tmp/test-$REPOSITORY - sed -i 's/grpcbin:9000/localhost:15002/g' /tmp/test-$REPOSITORY/spec/*.lua - sed -i 's/grpcbin:9001/localhost:15003/g' /tmp/test-$REPOSITORY/spec/*.lua - cp -R /tmp/test-$REPOSITORY/spec/fixtures/* spec/fixtures/ || true - pushd /tmp/test-$REPOSITORY - luarocks make - popd - - $TEST_CMD /tmp/test-$REPOSITORY/spec/ || echo "* $REPOSITORY" >> .failed - - done - fi - - if [ -f .failed ]; then - echo - red "--------------------------------------" - red "Plugin tests failed:" - red "--------------------------------------" - cat .failed - exit 1 - else - exit 0 - fi -fi -if [ "$TEST_SUITE" == "pdk" ]; then - prove -I. -r t -fi -if [ "$TEST_SUITE" == "unit" ]; then - unset KONG_TEST_NGINX_USER KONG_PG_PASSWORD KONG_TEST_PG_PASSWORD - scripts/autodoc - bin/busted -v -o htest spec/01-unit - make lint -fi diff --git a/.ci/test_suites.json b/.ci/test_suites.json new file mode 100644 index 00000000000..eb6b15e5909 --- /dev/null +++ b/.ci/test_suites.json @@ -0,0 +1,34 @@ +[ + { + "name": "unit", + "exclude_tags": "flaky,ipv6", + "specs": ["spec/01-unit/"] + }, + { + "name": "integration", + "exclude_tags": "flaky,ipv6,off", + "environment": { + "KONG_TEST_DATABASE": "postgres" + }, + "specs": ["spec/02-integration/"] + }, + { + "name": "dbless", + "exclude_tags": "flaky,ipv6,postgres,db", + "specs": [ + "spec/02-integration/02-cmd/", + "spec/02-integration/05-proxy/", + "spec/02-integration/04-admin_api/02-kong_routes_spec.lua", + "spec/02-integration/04-admin_api/15-off_spec.lua", + "spec/02-integration/08-status_api/01-core_routes_spec.lua", + "spec/02-integration/08-status_api/03-readiness_endpoint_spec.lua", + "spec/02-integration/11-dbless/", + "spec/02-integration/20-wasm/" + ] + }, + { + "name": "plugins", + "exclude_tags": "flaky,ipv6", + "specs": ["spec/03-plugins/"] + } +] diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 9ad8a072ebb..1aa7fc23a58 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -33,6 +33,7 @@ concurrency: env: BUILD_ROOT: ${{ github.workspace }}/bazel-bin/build KONG_TEST_COVERAGE: ${{ inputs.coverage == true || github.event_name == 'schedule' }} + RUNNER_COUNT: 7 jobs: build: @@ -40,22 +41,11 @@ jobs: with: relative-build-root: bazel-bin/build - lint-doc-and-unit-tests: - name: Lint, Doc and Unit tests + lint-and-doc-tests: + name: Lint and Doc tests runs-on: ubuntu-22.04 needs: build - services: - postgres: - image: postgres:13 - env: - POSTGRES_USER: kong - POSTGRES_DB: kong - POSTGRES_HOST_AUTH_METHOD: trust - ports: - - 5432:5432 - options: --health-cmd pg_isready --health-interval 5s --health-timeout 5s --health-retries 8 - steps: - name: Bump max open files run: | @@ -100,41 +90,56 @@ jobs: - name: Check labeler configuration run: scripts/check-labeler.pl .github/labeler.yml - - name: Unit tests - env: - KONG_TEST_PG_DATABASE: kong - KONG_TEST_PG_USER: kong - run: | - source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh - TEST_CMD="bin/busted -v -o htest spec/01-unit" - if [[ $KONG_TEST_COVERAGE = true ]]; then - TEST_CMD="$TEST_CMD --coverage" - fi - $TEST_CMD + schedule: + name: Schedule busted tests to run + runs-on: ubuntu-22.04 + needs: build - - name: Archive coverage stats file + env: + WORKFLOW_ID: ${{ github.run_id }} + + outputs: + runners: ${{ steps.generate-runner-array.outputs.RUNNERS }} + + steps: + - name: Checkout source code + uses: actions/checkout@v4 + + - name: Download runtimes file + uses: Kong/gh-storage/download@v1 + with: + repo-path: Kong/gateway-action-storage/main/.ci/runtimes.json + + - name: Schedule tests + uses: Kong/gateway-test-scheduler/schedule@b91bd7aec42bd13748652929f087be81d1d40843 # v1 + with: + test-suites-file: .ci/test_suites.json + test-file-runtime-file: .ci/runtimes.json + output-prefix: test-chunk. + runner-count: ${{ env.RUNNER_COUNT }} + + - name: Upload schedule files uses: actions/upload-artifact@v3 - if: ${{ always() && (inputs.coverage == true || github.event_name == 'schedule') }} + continue-on-error: true with: - name: luacov-stats-out-${{ github.job }}-${{ github.run_id }} - retention-days: 1 - path: | - luacov.stats.out + name: schedule-test-files + path: test-chunk.* + retention-days: 7 - - name: Get kernel message - if: failure() + - name: Generate runner array + id: generate-runner-array run: | - sudo dmesg -T + echo "RUNNERS=[$(seq -s "," 1 $(( "$RUNNER_COUNT" )))]" >> "$GITHUB_OUTPUT" - integration-tests-postgres: - name: Postgres ${{ matrix.suite }} - ${{ matrix.split }} tests + busted-tests: + name: Busted test runner ${{ matrix.runner }} runs-on: ubuntu-22.04 - needs: build + needs: [build,schedule] + strategy: fail-fast: false matrix: - suite: [integration, plugins] - split: [first, second] + runner: ${{ fromJSON(needs.schedule.outputs.runners) }} services: postgres: @@ -193,7 +198,6 @@ jobs: echo "127.0.0.1 grpcs_2.test" | sudo tee -a /etc/hosts - name: Enable SSL for Redis - if: ${{ matrix.suite == 'plugins' }} run: | docker cp ${{ github.workspace }} kong_redis:/workspace docker cp ${{ github.workspace }}/spec/fixtures/redis/docker-entrypoint.sh kong_redis:/usr/local/bin/docker-entrypoint.sh @@ -216,47 +220,53 @@ jobs: docker logs opentelemetry-collector - name: Install AWS SAM cli tool - if: ${{ matrix.suite == 'plugins' }} run: | curl -L -s -o /tmp/aws-sam-cli.zip https://github.com/aws/aws-sam-cli/releases/latest/download/aws-sam-cli-linux-x86_64.zip unzip -o /tmp/aws-sam-cli.zip -d /tmp/aws-sam-cli sudo /tmp/aws-sam-cli/install --update - - name: Update PATH + - name: Create kong_ro user in Postgres run: | - echo "$BUILD_ROOT/kong-dev/bin" >> $GITHUB_PATH - echo "$BUILD_ROOT/kong-dev/openresty/nginx/sbin" >> $GITHUB_PATH - echo "$BUILD_ROOT/kong-dev/openresty/bin" >> $GITHUB_PATH - - - name: Debug (nginx) - run: | - echo nginx: $(which nginx) - nginx -V 2>&1 | sed -re 's/ --/\n--/g' - ldd $(which nginx) - - - name: Debug (luarocks) - run: | - echo luarocks: $(which luarocks) - luarocks --version - luarocks config + psql -v ON_ERROR_STOP=1 -h localhost --username kong <<\EOD + CREATE user kong_ro; + GRANT CONNECT ON DATABASE kong TO kong_ro; + \c kong; + GRANT USAGE ON SCHEMA public TO kong_ro; + ALTER DEFAULT PRIVILEGES FOR ROLE kong IN SCHEMA public GRANT SELECT ON TABLES TO kong_ro; + EOD - name: Tune up postgres max_connections run: | # arm64 runners may use more connections due to more worker cores psql -hlocalhost -Ukong kong -tAc 'alter system set max_connections = 5000;' - - name: Generate test rerun filename + - name: Download test schedule file + uses: actions/download-artifact@v3 + with: + name: schedule-test-files + + - name: Generate helper environment variables run: | - echo FAILED_TEST_FILES_FILE=$(echo '${{ github.run_id }}-${{ matrix.suite }}-${{ matrix.split }}' | tr A-Z a-z | sed -Ee 's/[^a-z0-9]+/-/g').txt >> $GITHUB_ENV + echo FAILED_TEST_FILES_FILE=failed-tests.json >> $GITHUB_ENV + echo TEST_FILE_RUNTIME_FILE=test-runtime.json >> $GITHUB_ENV + - name: Build & install dependencies + run: | + make dev - name: Download test rerun information uses: actions/download-artifact@v3 continue-on-error: true with: - name: ${{ env.FAILED_TEST_FILES_FILE }} + name: test-rerun-info-${{ matrix.runner }} - - name: Tests + - name: Download test runtime statistics from previous runs + uses: actions/download-artifact@v3 + continue-on-error: true + with: + name: test-runtime-statistics-${{ matrix.runner }} + + - name: Run Tests env: KONG_TEST_PG_DATABASE: kong KONG_TEST_PG_USER: kong @@ -264,115 +274,44 @@ jobs: KONG_SPEC_TEST_GRPCBIN_PORT: "15002" KONG_SPEC_TEST_GRPCBIN_SSL_PORT: "15003" KONG_SPEC_TEST_OTELCOL_FILE_EXPORTER_PATH: ${{ github.workspace }}/tmp/otel/file_exporter.json - TEST_SUITE: ${{ matrix.suite }} - TEST_SPLIT: ${{ matrix.split }} - run: | - make dev # required to install other dependencies like bin/grpcurl - source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh - .ci/run_tests.sh + DD_ENV: ci + DD_SERVICE: kong-ce-ci + DD_CIVISIBILITY_MANUAL_API_ENABLED: 1 + DD_CIVISIBILITY_AGENTLESS_ENABLED: true + DD_TRACE_GIT_METADATA_ENABLED: true + DD_API_KEY: ${{ secrets.DATADOG_API_KEY }} + uses: Kong/gateway-test-scheduler/runner@b91bd7aec42bd13748652929f087be81d1d40843 # v1 + with: + tests-to-run-file: test-chunk.${{ matrix.runner }}.json + failed-test-files-file: ${{ env.FAILED_TEST_FILES_FILE }} + test-file-runtime-file: ${{ env.TEST_FILE_RUNTIME_FILE }} + setup-venv: . ${{ env.BUILD_ROOT }}/kong-dev-venv.sh - name: Upload test rerun information if: always() uses: actions/upload-artifact@v3 with: - name: ${{ env.FAILED_TEST_FILES_FILE }} + name: test-rerun-info-${{ matrix.runner }} path: ${{ env.FAILED_TEST_FILES_FILE }} retention-days: 2 - - name: Archive coverage stats file + - name: Upload test runtime statistics for offline scheduling + if: always() uses: actions/upload-artifact@v3 - if: ${{ always() && (inputs.coverage == true || github.event_name == 'schedule') }} with: - name: luacov-stats-out-${{ github.job }}-${{ github.run_id }}-${{ matrix.suite }}-${{ contains(matrix.split, 'first') && '1' || '2' }} - retention-days: 1 - path: | - luacov.stats.out - - - name: Get kernel message - if: failure() - run: | - sudo dmesg -T - - integration-tests-dbless: - name: DB-less integration tests - runs-on: ubuntu-22.04 - needs: build - - services: - grpcbin: - image: kong/grpcbin - ports: - - 15002:9000 - - 15003:9001 - - steps: - - name: Bump max open files - run: | - sudo echo 'kong soft nofile 65536' | sudo tee -a /etc/security/limits.d/kong-ci.conf - sudo echo 'kong hard nofile 65536' | sudo tee -a /etc/security/limits.d/kong-ci.conf - sudo echo "$(whoami) soft nofile 65536" | sudo tee -a /etc/security/limits.d/kong-ci.conf - sudo echo "$(whoami) hard nofile 65536" | sudo tee -a /etc/security/limits.d/kong-ci.conf - - - name: Checkout Kong source code - uses: actions/checkout@v4 - - - name: Lookup build cache - id: cache-deps - uses: actions/cache@v3 - with: - path: ${{ env.BUILD_ROOT }} - key: ${{ needs.build.outputs.cache-key }} - - - name: Build WASM Test Filters - uses: ./.github/actions/build-wasm-test-filters - - - name: Add gRPC test host names - run: | - echo "127.0.0.1 grpcs_1.test" | sudo tee -a /etc/hosts - echo "127.0.0.1 grpcs_2.test" | sudo tee -a /etc/hosts - - - name: Run OpenTelemetry Collector - run: | - mkdir -p ${{ github.workspace }}/tmp/otel - touch ${{ github.workspace }}/tmp/otel/file_exporter.json - sudo chmod 777 -R ${{ github.workspace }}/tmp/otel - docker run -p 4317:4317 -p 4318:4318 -p 55679:55679 \ - -v ${{ github.workspace }}/spec/fixtures/opentelemetry/otelcol.yaml:/etc/otel-collector-config.yaml \ - -v ${{ github.workspace }}/tmp/otel:/etc/otel \ - --name opentelemetry-collector -d \ - otel/opentelemetry-collector-contrib:0.52.0 \ - --config=/etc/otel-collector-config.yaml - sleep 2 - docker logs opentelemetry-collector - - - name: Tests - env: - KONG_TEST_PG_DATABASE: kong - KONG_TEST_PG_USER: kong - KONG_TEST_DATABASE: 'off' - KONG_SPEC_TEST_GRPCBIN_PORT: "15002" - KONG_SPEC_TEST_GRPCBIN_SSL_PORT: "15003" - KONG_SPEC_TEST_OTELCOL_FILE_EXPORTER_PATH: ${{ github.workspace }}/tmp/otel/file_exporter.json - TEST_SUITE: dbless - run: | - make dev # required to install other dependencies like bin/grpcurl - source ${{ env.BUILD_ROOT }}/kong-dev-venv.sh - .ci/run_tests.sh + name: test-runtime-statistics-${{ matrix.runner }} + path: ${{ env.TEST_FILE_RUNTIME_FILE }} + retention-days: 7 - name: Archive coverage stats file uses: actions/upload-artifact@v3 if: ${{ always() && (inputs.coverage == true || github.event_name == 'schedule') }} with: - name: luacov-stats-out-${{ github.job }}-${{ github.run_id }} + name: luacov-stats-out-${{ github.job }}-${{ github.run_id }}-${{ matrix.runner }} retention-days: 1 path: | luacov.stats.out - - name: Get kernel message - if: failure() - run: | - sudo dmesg -T - pdk-tests: name: PDK tests runs-on: ubuntu-22.04 @@ -416,7 +355,7 @@ jobs: export PDK_LUACOV=1 fi eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) - .ci/run_tests.sh + prove -I. -r t - name: Archive coverage stats file uses: actions/upload-artifact@v3 @@ -432,9 +371,9 @@ jobs: run: | sudo dmesg -T - aggregator: - needs: [lint-doc-and-unit-tests,pdk-tests,integration-tests-postgres,integration-tests-dbless] - name: Luacov stats aggregator + cleanup-and-aggregate-stats: + needs: [lint-and-doc-tests,pdk-tests,busted-tests] + name: Cleanup and Luacov stats aggregator if: ${{ always() && (inputs.coverage == true || github.event_name == 'schedule') }} runs-on: ubuntu-22.04 diff --git a/.github/workflows/update-test-runtime-statistics.yml b/.github/workflows/update-test-runtime-statistics.yml new file mode 100644 index 00000000000..43e4017a518 --- /dev/null +++ b/.github/workflows/update-test-runtime-statistics.yml @@ -0,0 +1,35 @@ +name: Update test runtime statistics file for test scheduling +on: + workflow_dispatch: + schedule: + - cron: "1 0 * * SAT" + # push rule below needed for testing only + push: + branches: + - feat/test-run-scheduler + +jobs: + process-statistics: + name: Download statistics from GitHub and combine them + runs-on: ubuntu-22.04 + steps: + - name: Checkout source code + uses: actions/checkout@v4 + with: + token: ${{ secrets.PAT }} + + - name: Process statistics + uses: Kong/gateway-test-scheduler/analyze@b91bd7aec42bd13748652929f087be81d1d40843 # v1 + env: + GITHUB_TOKEN: ${{ secrets.PAT }} + with: + workflow-name: build_and_test.yml + test-file-runtime-file: .ci/runtimes.json + artifact-name-regexp: "^test-runtime-statistics-\\d+$" + + - name: Upload new runtimes file + uses: Kong/gh-storage/upload@v1 + env: + GITHUB_TOKEN: ${{ secrets.PAT }} + with: + repo-path: Kong/gateway-action-storage/main/.ci/runtimes.json diff --git a/spec/busted-ci-helper.lua b/spec/busted-ci-helper.lua new file mode 100644 index 00000000000..a28b2f367ef --- /dev/null +++ b/spec/busted-ci-helper.lua @@ -0,0 +1,54 @@ +-- busted-ci-helper.lua + +local busted = require 'busted' +local cjson = require 'cjson' +local socket_unix = require 'socket.unix' + +local busted_event_path = os.getenv("BUSTED_EVENT_PATH") + +-- Function to recursively copy a table, skipping keys associated with functions +local function copyTable(original, copied) + copied = copied or {} + + for key, value in pairs(original) do + if type(value) == "table" then + copied[key] = copyTable(value, {}) + elseif type(value) ~= "function" then + copied[key] = value + end + end + + return copied +end + +if busted_event_path then + local sock = assert(socket_unix()) + assert(sock:connect(busted_event_path)) + + local events = {{ 'suite', 'reset' }, + { 'suite', 'start' }, + { 'suite', 'end' }, + { 'file', 'start' }, + { 'file', 'end' }, + { 'test', 'start' }, + { 'test', 'end' }, + { 'pending' }, + { 'failure', 'it' }, + { 'error', 'it' }, + { 'failure' }, + { 'error' }} + for _, event in ipairs(events) do + busted.subscribe(event, function (...) + local args = {} + for i, original in ipairs{...} do + if type(original) == "table" then + args[i] = copyTable(original) + elseif type(original) ~= "function" then + args[i] = original + end + end + + sock:send(cjson.encode({ event = event[1] .. (event[2] and ":" .. event[2] or ""), args = args }) .. "\n") + end) + end +end diff --git a/spec/busted-log-failed.lua b/spec/busted-log-failed.lua deleted file mode 100644 index 7bfe6804b83..00000000000 --- a/spec/busted-log-failed.lua +++ /dev/null @@ -1,33 +0,0 @@ --- busted-log-failed.lua - --- Log which test files run by busted had failures or errors in a --- file. The file to use for logging is specified in the --- FAILED_TEST_FILES_FILE environment variable. This is used to --- reduce test rerun times for flaky tests. - -local busted = require 'busted' -local failed_files_file = assert(os.getenv("FAILED_TEST_FILES_FILE"), - "FAILED_TEST_FILES_FILE environment variable not set") - -local FAILED_FILES = {} - -busted.subscribe({ 'failure' }, function(element, parent, message, debug) - FAILED_FILES[element.trace.source] = true -end) - -busted.subscribe({ 'error' }, function(element, parent, message, debug) - FAILED_FILES[element.trace.source] = true -end) - -busted.subscribe({ 'suite', 'end' }, function(suite, count, total) - local output = assert(io.open(failed_files_file, "w")) - if next(FAILED_FILES) then - for failed_file in pairs(FAILED_FILES) do - if failed_file:sub(1, 1) == '@' then - failed_file = failed_file:sub(2) - end - assert(output:write(failed_file .. "\n")) - end - end - output:close() -end) From 7c29cec376e92b4cecde70ad922994f5d1d68ac8 Mon Sep 17 00:00:00 2001 From: samugi Date: Mon, 15 Jan 2024 16:16:04 +0100 Subject: [PATCH 39/91] chore(ci): bump scheduler + consistency with EE * bump test scheduler to v3 * apply changes required by v3: pass `xml-output-file` and `setup-venv-path` params to runner * update busted ci helper to be consistent with EE * reintroduce debug steps in build and test workflow --- .ci/test_suites.json | 4 +++ .github/workflows/build_and_test.yml | 30 +++++++++++++++++-- .../update-test-runtime-statistics.yml | 2 +- Makefile | 2 +- spec/busted-ci-helper.lua | 18 +++++++++-- 5 files changed, 48 insertions(+), 8 deletions(-) diff --git a/.ci/test_suites.json b/.ci/test_suites.json index eb6b15e5909..3a15dd205c5 100644 --- a/.ci/test_suites.json +++ b/.ci/test_suites.json @@ -2,6 +2,7 @@ { "name": "unit", "exclude_tags": "flaky,ipv6", + "venv_script": "kong-dev-venv.sh", "specs": ["spec/01-unit/"] }, { @@ -10,11 +11,13 @@ "environment": { "KONG_TEST_DATABASE": "postgres" }, + "venv_script": "kong-dev-venv.sh", "specs": ["spec/02-integration/"] }, { "name": "dbless", "exclude_tags": "flaky,ipv6,postgres,db", + "venv_script": "kong-dev-venv.sh", "specs": [ "spec/02-integration/02-cmd/", "spec/02-integration/05-proxy/", @@ -29,6 +32,7 @@ { "name": "plugins", "exclude_tags": "flaky,ipv6", + "venv_script": "kong-dev-venv.sh", "specs": ["spec/03-plugins/"] } ] diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 1aa7fc23a58..210d7a3b61b 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -111,12 +111,13 @@ jobs: repo-path: Kong/gateway-action-storage/main/.ci/runtimes.json - name: Schedule tests - uses: Kong/gateway-test-scheduler/schedule@b91bd7aec42bd13748652929f087be81d1d40843 # v1 + uses: Kong/gateway-test-scheduler/schedule@69f0c2a562ac44fc3650b8bfa62106b34094b5ce # v3 with: test-suites-file: .ci/test_suites.json test-file-runtime-file: .ci/runtimes.json output-prefix: test-chunk. runner-count: ${{ env.RUNNER_COUNT }} + static-mode: ${{ github.run_attempt > 1 }} - name: Upload schedule files uses: actions/upload-artifact@v3 @@ -225,6 +226,24 @@ jobs: unzip -o /tmp/aws-sam-cli.zip -d /tmp/aws-sam-cli sudo /tmp/aws-sam-cli/install --update + - name: Update PATH + run: | + echo "$BUILD_ROOT/kong-dev/bin" >> $GITHUB_PATH + echo "$BUILD_ROOT/kong-dev/openresty/nginx/sbin" >> $GITHUB_PATH + echo "$BUILD_ROOT/kong-dev/openresty/bin" >> $GITHUB_PATH + + - name: Debug (nginx) + run: | + echo nginx: $(which nginx) + nginx -V 2>&1 | sed -re 's/ --/\n--/g' + ldd $(which nginx) + + - name: Debug (luarocks) + run: | + echo luarocks: $(which luarocks) + luarocks --version + luarocks config + - name: Create kong_ro user in Postgres run: | psql -v ON_ERROR_STOP=1 -h localhost --username kong <<\EOD @@ -280,12 +299,12 @@ jobs: DD_CIVISIBILITY_AGENTLESS_ENABLED: true DD_TRACE_GIT_METADATA_ENABLED: true DD_API_KEY: ${{ secrets.DATADOG_API_KEY }} - uses: Kong/gateway-test-scheduler/runner@b91bd7aec42bd13748652929f087be81d1d40843 # v1 + uses: Kong/gateway-test-scheduler/runner@69f0c2a562ac44fc3650b8bfa62106b34094b5ce # v3 with: tests-to-run-file: test-chunk.${{ matrix.runner }}.json failed-test-files-file: ${{ env.FAILED_TEST_FILES_FILE }} test-file-runtime-file: ${{ env.TEST_FILE_RUNTIME_FILE }} - setup-venv: . ${{ env.BUILD_ROOT }}/kong-dev-venv.sh + setup-venv-path: ${{ env.BUILD_ROOT }} - name: Upload test rerun information if: always() @@ -312,6 +331,11 @@ jobs: path: | luacov.stats.out + - name: Get kernel message + if: failure() + run: | + sudo dmesg -T + pdk-tests: name: PDK tests runs-on: ubuntu-22.04 diff --git a/.github/workflows/update-test-runtime-statistics.yml b/.github/workflows/update-test-runtime-statistics.yml index 43e4017a518..928718a5cd1 100644 --- a/.github/workflows/update-test-runtime-statistics.yml +++ b/.github/workflows/update-test-runtime-statistics.yml @@ -19,7 +19,7 @@ jobs: token: ${{ secrets.PAT }} - name: Process statistics - uses: Kong/gateway-test-scheduler/analyze@b91bd7aec42bd13748652929f087be81d1d40843 # v1 + uses: Kong/gateway-test-scheduler/analyze@69f0c2a562ac44fc3650b8bfa62106b34094b5ce # v3 env: GITHUB_TOKEN: ${{ secrets.PAT }} with: diff --git a/Makefile b/Makefile index af0ff49c799..abeac75ec63 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ OS := $(shell uname | awk '{print tolower($$0)}') MACHINE := $(shell uname -m) -DEV_ROCKS = "busted 2.2.0" "busted-htest 1.0.0" "luacheck 1.1.2" "lua-llthreads2 0.1.6" "ldoc 1.5.0" "luacov 0.15.0" +DEV_ROCKS = "busted 2.2.0" "busted-hjtest 0.0.5" "luacheck 1.1.2" "lua-llthreads2 0.1.6" "ldoc 1.5.0" "luacov 0.15.0" WIN_SCRIPTS = "bin/busted" "bin/kong" "bin/kong-health" BUSTED_ARGS ?= -v TEST_CMD ?= bin/busted $(BUSTED_ARGS) diff --git a/spec/busted-ci-helper.lua b/spec/busted-ci-helper.lua index a28b2f367ef..699d894dfa2 100644 --- a/spec/busted-ci-helper.lua +++ b/spec/busted-ci-helper.lua @@ -7,12 +7,22 @@ local socket_unix = require 'socket.unix' local busted_event_path = os.getenv("BUSTED_EVENT_PATH") -- Function to recursively copy a table, skipping keys associated with functions -local function copyTable(original, copied) - copied = copied or {} +local function copyTable(original, copied, cache, max_depth, current_depth) + copied = copied or {} + cache = cache or {} + max_depth = max_depth or 5 + current_depth = current_depth or 1 + + if cache[original] then return cache[original] end + cache[original] = copied for key, value in pairs(original) do if type(value) == "table" then - copied[key] = copyTable(value, {}) + if current_depth < max_depth then + copied[key] = copyTable(value, {}, cache, max_depth, current_depth + 1) + end + elseif type(value) == "userdata" then + copied[key] = tostring(value) elseif type(value) ~= "function" then copied[key] = value end @@ -43,6 +53,8 @@ if busted_event_path then for i, original in ipairs{...} do if type(original) == "table" then args[i] = copyTable(original) + elseif type(original) == "userdata" then + args[i] = tostring(original) elseif type(original) ~= "function" then args[i] = original end From b0bce58083d064c60b6cd4b5878efc0fe4c2090d Mon Sep 17 00:00:00 2001 From: samugi Date: Wed, 3 Jan 2024 18:10:43 +0100 Subject: [PATCH 40/91] fix(tests): failures emerged running the scheduler after fixing the test scheduler helper, new failures emerged. This commit fixes them. fix(test-scheduler): pass github token to gh-storage --- .github/workflows/build_and_test.yml | 13 ++------ .../03-consistent_hashing_spec.lua | 1 + spec/02-integration/02-cmd/03-reload_spec.lua | 7 ++-- spec/02-integration/02-cmd/07-health_spec.lua | 1 + spec/02-integration/03-db/01-db_spec.lua | 33 ++++++++++++++----- .../03-db/11-postgres-ro_spec.lua | 14 +++++++- .../08-status_api/04-config_spec.lua | 4 +++ 7 files changed, 51 insertions(+), 22 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 210d7a3b61b..8cb47a16550 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -107,6 +107,8 @@ jobs: - name: Download runtimes file uses: Kong/gh-storage/download@v1 + env: + GITHUB_TOKEN: ${{ secrets.PAT }} with: repo-path: Kong/gateway-action-storage/main/.ci/runtimes.json @@ -206,7 +208,6 @@ jobs: docker logs kong_redis - name: Run OpenTelemetry Collector - if: ${{ matrix.suite == 'plugins' }} run: | mkdir -p ${{ github.workspace }}/tmp/otel touch ${{ github.workspace }}/tmp/otel/file_exporter.json @@ -244,16 +245,6 @@ jobs: luarocks --version luarocks config - - name: Create kong_ro user in Postgres - run: | - psql -v ON_ERROR_STOP=1 -h localhost --username kong <<\EOD - CREATE user kong_ro; - GRANT CONNECT ON DATABASE kong TO kong_ro; - \c kong; - GRANT USAGE ON SCHEMA public TO kong_ro; - ALTER DEFAULT PRIVILEGES FOR ROLE kong IN SCHEMA public GRANT SELECT ON TABLES TO kong_ro; - EOD - - name: Tune up postgres max_connections run: | # arm64 runners may use more connections due to more worker cores diff --git a/spec/01-unit/09-balancer/03-consistent_hashing_spec.lua b/spec/01-unit/09-balancer/03-consistent_hashing_spec.lua index 8e904d730ac..3e756675562 100644 --- a/spec/01-unit/09-balancer/03-consistent_hashing_spec.lua +++ b/spec/01-unit/09-balancer/03-consistent_hashing_spec.lua @@ -8,6 +8,7 @@ assert:set_parameter("TableFormatLevel", 5) -- when displaying tables, set a big local client local targets, balancers +require "spec.helpers" -- initialize db local dns_utils = require "kong.resty.dns.utils" local mocker = require "spec.fixtures.mocker" local utils = require "kong.tools.utils" diff --git a/spec/02-integration/02-cmd/03-reload_spec.lua b/spec/02-integration/02-cmd/03-reload_spec.lua index 2c6464304f6..364a3f57659 100644 --- a/spec/02-integration/02-cmd/03-reload_spec.lua +++ b/spec/02-integration/02-cmd/03-reload_spec.lua @@ -602,6 +602,9 @@ describe("key-auth plugin invalidation on dbless reload #off", function() nginx_conf = "spec/fixtures/custom_nginx.template", })) + -- wait for the worker to be ready + helpers.get_kong_workers(1) + proxy_client = helpers.proxy_client() local res = assert(proxy_client:send { method = "GET", @@ -653,6 +656,7 @@ describe("key-auth plugin invalidation on dbless reload #off", function() - key: my-new-key ]], yaml_file) assert(helpers.reload_kong("off", "reload --prefix " .. helpers.test_conf.prefix, { + database = "off", declarative_config = yaml_file, })) @@ -669,8 +673,7 @@ describe("key-auth plugin invalidation on dbless reload #off", function() local body = assert.res_status(200, res) local json = cjson.decode(body) admin_client:close() - assert.same(1, #json.data) - return "my-new-key" == json.data[1].key + return #json.data == 1 and "my-new-key" == json.data[1].key end, 5) helpers.wait_until(function() diff --git a/spec/02-integration/02-cmd/07-health_spec.lua b/spec/02-integration/02-cmd/07-health_spec.lua index 0d035d1b6c5..dd8c69d98db 100644 --- a/spec/02-integration/02-cmd/07-health_spec.lua +++ b/spec/02-integration/02-cmd/07-health_spec.lua @@ -12,6 +12,7 @@ end for _, health_cmd in ipairs({"health", "bin/kong-health"}) do describe("kong health-check: " .. health_cmd, function() lazy_setup(function() + helpers.get_db_utils(nil, {}) -- runs migrations helpers.prepare_prefix() end) lazy_teardown(function() diff --git a/spec/02-integration/03-db/01-db_spec.lua b/spec/02-integration/03-db/01-db_spec.lua index bd368cbeaa7..ea604874ffe 100644 --- a/spec/02-integration/03-db/01-db_spec.lua +++ b/spec/02-integration/03-db/01-db_spec.lua @@ -4,10 +4,26 @@ local utils = require "kong.tools.utils" for _, strategy in helpers.each_strategy() do - local postgres_only = strategy == "postgres" and it or pending - +local postgres_only = strategy == "postgres" and it or pending + + +describe("db_spec [#" .. strategy .. "]", function() + lazy_setup(function() + local _, db = helpers.get_db_utils(strategy, {}) + -- db RO permissions setup + local pg_ro_user = helpers.test_conf.pg_ro_user + local pg_db = helpers.test_conf.pg_database + db:schema_reset() + db.connector:query(string.format("CREATE user %s;", pg_ro_user)) + db.connector:query(string.format([[ + GRANT CONNECT ON DATABASE %s TO %s; + GRANT USAGE ON SCHEMA public TO %s; + ALTER DEFAULT PRIVILEGES FOR ROLE kong IN SCHEMA public GRANT SELECT ON TABLES TO %s; + ]], pg_db, pg_ro_user, pg_ro_user, pg_ro_user)) + helpers.bootstrap_database(db) + end) - describe("kong.db.init [#" .. strategy .. "]", function() + describe("kong.db.init", function() describe(".new()", function() it("errors on invalid arg", function() assert.has_error(function() @@ -103,7 +119,7 @@ for _, strategy in helpers.each_strategy() do end) end) - describe(":init_connector() [#" .. strategy .. "]", function() + describe(":init_connector()", function() it("initializes infos", function() local db, err = DB.new(helpers.test_conf, strategy) @@ -177,7 +193,7 @@ for _, strategy in helpers.each_strategy() do end) - describe(":connect() [#" .. strategy .. "]", function() + describe(":connect()", function() lazy_setup(function() helpers.get_db_utils(strategy, {}) end) @@ -396,7 +412,7 @@ for _, strategy in helpers.each_strategy() do end) end) - describe("#testme :query() [#" .. strategy .. "]", function() + describe("#testme :query()", function() lazy_setup(function() helpers.get_db_utils(strategy, {}) end) @@ -441,7 +457,7 @@ for _, strategy in helpers.each_strategy() do end) end) - describe(":setkeepalive() [#" .. strategy .. "]", function() + describe(":setkeepalive()", function() lazy_setup(function() helpers.get_db_utils(strategy, {}) end) @@ -654,7 +670,7 @@ for _, strategy in helpers.each_strategy() do end) - describe(":close() [#" .. strategy .. "]", function() + describe(":close()", function() lazy_setup(function() helpers.get_db_utils(strategy, {}) end) @@ -855,4 +871,5 @@ for _, strategy in helpers.each_strategy() do db:close() end) end) +end) end diff --git a/spec/02-integration/03-db/11-postgres-ro_spec.lua b/spec/02-integration/03-db/11-postgres-ro_spec.lua index c97a6b797b6..30dfcc2670b 100644 --- a/spec/02-integration/03-db/11-postgres-ro_spec.lua +++ b/spec/02-integration/03-db/11-postgres-ro_spec.lua @@ -9,11 +9,23 @@ for _, strategy in helpers.each_strategy() do local proxy_client, admin_client lazy_setup(function() - helpers.get_db_utils(strategy, { + local _, db = helpers.get_db_utils(strategy, { "routes", "services", }) -- runs migrations + -- db RO permissions setup + local pg_ro_user = helpers.test_conf.pg_ro_user + local pg_db = helpers.test_conf.pg_database + db:schema_reset() + db.connector:query(string.format("CREATE user %s;", pg_ro_user)) + db.connector:query(string.format([[ + GRANT CONNECT ON DATABASE %s TO %s; + GRANT USAGE ON SCHEMA public TO %s; + ALTER DEFAULT PRIVILEGES FOR ROLE kong IN SCHEMA public GRANT SELECT ON TABLES TO %s; + ]], pg_db, pg_ro_user, pg_ro_user, pg_ro_user)) + helpers.bootstrap_database(db) + assert(helpers.start_kong({ database = strategy, pg_ro_host = helpers.test_conf.pg_host, diff --git a/spec/02-integration/08-status_api/04-config_spec.lua b/spec/02-integration/08-status_api/04-config_spec.lua index fd1ac14372c..c811505033f 100644 --- a/spec/02-integration/08-status_api/04-config_spec.lua +++ b/spec/02-integration/08-status_api/04-config_spec.lua @@ -3,6 +3,10 @@ local cjson = require "cjson" for _, strategy in helpers.all_strategies() do describe("Status API - with strategy #" .. strategy, function() + lazy_setup(function() + helpers.get_db_utils(nil, {}) -- runs migrations + end) + it("default enable", function() assert.truthy(helpers.kong_exec("start -c spec/fixtures/default_status_listen.conf")) local client = helpers.http_client("127.0.0.1", 8007, 20000) From 246fd3059445d346aa30fb45649e2eb435756157 Mon Sep 17 00:00:00 2001 From: samugi Date: Wed, 3 Jan 2024 13:58:21 +0100 Subject: [PATCH 41/91] fix(ci): test scheduler busted helper We use `busted.subscribe` to override the output handlers with a callback. To implement the mediator pattern, Busted uses [mediator_lua](https://github.com/Olivine-Labs/mediator_lua). The second value returned by the subscription callback is used to decide whether to continue execution of other subscribers. Since we only return `nil`, the test failure was not handled to exit with the right status and failing tests were exiting with `0`. This commit changes the return value of the callback to: `nil, true` so that the original callback is executed to handle the test result and return the correct exit status. --- spec/busted-ci-helper.lua | 1 + 1 file changed, 1 insertion(+) diff --git a/spec/busted-ci-helper.lua b/spec/busted-ci-helper.lua index 699d894dfa2..be9e84ea145 100644 --- a/spec/busted-ci-helper.lua +++ b/spec/busted-ci-helper.lua @@ -61,6 +61,7 @@ if busted_event_path then end sock:send(cjson.encode({ event = event[1] .. (event[2] and ":" .. event[2] or ""), args = args }) .. "\n") + return nil, true --continue end) end end From ed3d9051eeffbcd4b2b5eb87e56ae43e4ff75d1c Mon Sep 17 00:00:00 2001 From: Samuele Date: Wed, 14 Feb 2024 20:41:09 +0100 Subject: [PATCH 42/91] chore(ci): re-enable off tests with the scheduler (#12565) --- .ci/test_suites.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.ci/test_suites.json b/.ci/test_suites.json index 3a15dd205c5..d44fa1f6a92 100644 --- a/.ci/test_suites.json +++ b/.ci/test_suites.json @@ -17,6 +17,9 @@ { "name": "dbless", "exclude_tags": "flaky,ipv6,postgres,db", + "environment": { + "KONG_TEST_DATABASE": "off" + }, "venv_script": "kong-dev-venv.sh", "specs": [ "spec/02-integration/02-cmd/", From 1961ac215eea5f155c0f4370f46e367c6649f8bd Mon Sep 17 00:00:00 2001 From: Joshua Schmid Date: Fri, 16 Feb 2024 10:32:14 +0100 Subject: [PATCH 43/91] chore: apply label on failed cherry-pick (#12410) Signed-off-by: Joshua Schmid --- .github/workflows/cherry-picks.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/cherry-picks.yml b/.github/workflows/cherry-picks.yml index 4886291dae9..5d59cc8e34b 100644 --- a/.github/workflows/cherry-picks.yml +++ b/.github/workflows/cherry-picks.yml @@ -27,6 +27,7 @@ jobs: token: ${{ secrets.CHERRY_PICK_TOKEN }} - name: Create backport pull requests uses: jschmid1/cross-repo-cherrypick-action@2d2a475d31b060ac21521b5eda0a78876bbae94e #v1.1.0 + id: cherry_pick with: token: ${{ secrets.CHERRY_PICK_TOKEN }} pull_title: '[cherry-pick -> ${target_branch}] ${pull_title}' @@ -43,3 +44,8 @@ jobs: { "master": "master" } + - name: add label + if: steps.cherry_pick.outputs.was_successful == 'false' + uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf # v1.1.0 + with: + labels: incomplete-cherry-pick From 4df8780fd731e95d991ee01013f20a946920a22a Mon Sep 17 00:00:00 2001 From: Joshua Schmid Date: Fri, 16 Feb 2024 10:32:28 +0100 Subject: [PATCH 44/91] chore: apply label on failed backport (#12401) Signed-off-by: Joshua Schmid --- .github/workflows/backport.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 3bac92a1991..97b49acf1b6 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -15,6 +15,7 @@ jobs: - uses: actions/checkout@v4 - name: Create backport pull requests uses: korthout/backport-action@6e72f987c115430f6abc2fa92a74cdbf3e14b956 # v2.4.1 + id: backport with: github_token: ${{ secrets.PAT }} pull_title: '[backport -> ${target_branch}] ${pull_title}' @@ -34,3 +35,8 @@ jobs: { "detect_merge_method": true } + - name: add label + if: steps.backport.outputs.was_successful == 'false' + uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf # v1.1.0 + with: + labels: incomplete-backport From 2fb898da9b3de51e894c1336a6598de4d5ebd9f5 Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Fri, 16 Feb 2024 14:06:42 +0200 Subject: [PATCH 45/91] fix(vault): use global query when finding a vault by prefix (#12572) ### Summary In FTI-5762 it was reported that there is a problem with secret rotation when vaults are stored inside a workspace. This commit will fix it by passing `workspace = null` aka making a call a global call which will not then use the possibly incorrect workspace (default) to find vault entity (the vault config). The vault entity prefix is unique across workspaces. Signed-off-by: Aapo Talvensaari --- changelog/unreleased/kong/fix-vault-workspaces.yml | 3 +++ kong/pdk/vault.lua | 7 +++++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 changelog/unreleased/kong/fix-vault-workspaces.yml diff --git a/changelog/unreleased/kong/fix-vault-workspaces.yml b/changelog/unreleased/kong/fix-vault-workspaces.yml new file mode 100644 index 00000000000..c381ebcda87 --- /dev/null +++ b/changelog/unreleased/kong/fix-vault-workspaces.yml @@ -0,0 +1,3 @@ +message: "**Vault**: do not use incorrect (default) workspace identifier when retrieving vault entity by prefix" +type: bugfix +scope: Core diff --git a/kong/pdk/vault.lua b/kong/pdk/vault.lua index 81d154b9393..3dbcfe46bf9 100644 --- a/kong/pdk/vault.lua +++ b/kong/pdk/vault.lua @@ -60,6 +60,9 @@ local COLON = byte(":") local SLASH = byte("/") +local VAULT_QUERY_OPTS = { workspace = ngx.null } + + --- -- Checks if the passed in reference looks like a reference. -- Valid references start with '{vault://' and end with '}'. @@ -607,10 +610,10 @@ local function new(self) if cache then local vault_cache_key = vaults:cache_key(prefix) - vault, err = cache:get(vault_cache_key, nil, vaults.select_by_prefix, vaults, prefix) + vault, err = cache:get(vault_cache_key, nil, vaults.select_by_prefix, vaults, prefix, VAULT_QUERY_OPTS) else - vault, err = vaults:select_by_prefix(prefix) + vault, err = vaults:select_by_prefix(prefix, VAULT_QUERY_OPTS) end if not vault then From 84cb1be01d8e9a241e8a2b3afd6d55bb184e605b Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Fri, 16 Feb 2024 14:07:15 +0200 Subject: [PATCH 46/91] chore(conf): enable grpc_ssl_conf_command too (#12548) ### Summary The #12420 by @Water-Melon forgot to add `grpc_ssl_conf_command`. This commit adds that. Signed-off-by: Aapo Talvensaari --- kong/conf_loader/parse.lua | 1 + kong/templates/kong_defaults.lua | 1 + kong/templates/nginx_kong.lua | 1 + 3 files changed, 3 insertions(+) diff --git a/kong/conf_loader/parse.lua b/kong/conf_loader/parse.lua index bcdb9f0ff46..a4775b2f670 100644 --- a/kong/conf_loader/parse.lua +++ b/kong/conf_loader/parse.lua @@ -438,6 +438,7 @@ local function check_and_parse(conf, opts) "nginx_http_ssl_conf_command", "nginx_http_proxy_ssl_conf_command", "nginx_http_lua_ssl_conf_command", + "nginx_http_grpc_ssl_conf_command", "nginx_stream_ssl_conf_command", "nginx_stream_proxy_ssl_conf_command", "nginx_stream_lua_ssl_conf_command"}) do diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index 5c3931f9592..ef78afcdfe5 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -94,6 +94,7 @@ nginx_http_ssl_session_timeout = NONE nginx_http_ssl_conf_command = NONE nginx_http_proxy_ssl_conf_command = NONE nginx_http_lua_ssl_conf_command = NONE +nginx_http_grpc_ssl_conf_command = NONE nginx_http_lua_regex_match_limit = 100000 nginx_http_lua_regex_cache_max_entries = 8192 nginx_http_keepalive_requests = 10000 diff --git a/kong/templates/nginx_kong.lua b/kong/templates/nginx_kong.lua index 8cd97849c0e..07526a54a96 100644 --- a/kong/templates/nginx_kong.lua +++ b/kong/templates/nginx_kong.lua @@ -28,6 +28,7 @@ underscores_in_headers on; lua_ssl_conf_command CipherString DEFAULT:@SECLEVEL=0; proxy_ssl_conf_command CipherString DEFAULT:@SECLEVEL=0; ssl_conf_command CipherString DEFAULT:@SECLEVEL=0; +grpc_ssl_conf_command CipherString DEFAULT:@SECLEVEL=0; > end > if ssl_ciphers then ssl_ciphers ${{SSL_CIPHERS}}; From 91ca2cfdda11ef7f9f34ac74b266a803a2da7639 Mon Sep 17 00:00:00 2001 From: Qi Date: Mon, 19 Feb 2024 04:59:08 +0000 Subject: [PATCH 47/91] tests: start mocking server with random port instead of fixed port --- spec/02-integration/07-sdk/03-cluster_spec.lua | 10 ++++++---- .../38-ai-proxy/02-openai_integration_spec.lua | 2 +- .../38-ai-proxy/03-anthropic_integration_spec.lua | 2 +- .../38-ai-proxy/04-cohere_integration_spec.lua | 2 +- .../38-ai-proxy/05-azure_integration_spec.lua | 2 +- .../38-ai-proxy/06-mistral_integration_spec.lua | 2 +- .../38-ai-proxy/07-llama2_integration_spec.lua | 2 +- .../38-ai-proxy/08-encoding_integration_spec.lua | 2 +- .../39-ai-request-transformer/01-transformer_spec.lua | 2 +- .../39-ai-request-transformer/02-integration_spec.lua | 2 +- .../40-ai-response-transformer/02-integration_spec.lua | 2 +- 11 files changed, 16 insertions(+), 14 deletions(-) diff --git a/spec/02-integration/07-sdk/03-cluster_spec.lua b/spec/02-integration/07-sdk/03-cluster_spec.lua index 5f592dd8272..b7af4481cf5 100644 --- a/spec/02-integration/07-sdk/03-cluster_spec.lua +++ b/spec/02-integration/07-sdk/03-cluster_spec.lua @@ -1,4 +1,6 @@ local helpers = require("spec.helpers") +local CP_MOCK_PORT = helpers.get_available_port() +local DP_MOCK_PORT = helpers.get_available_port() local uuid_pattern = "^" .. ("%x"):rep(8) .. "%-" .. ("%x"):rep(4) .. "%-" .. ("%x"):rep(4) .. "%-" .. ("%x"):rep(4) .. "%-" @@ -10,7 +12,7 @@ local fixtures_dp = { fixtures_dp.http_mock.my_server_block = [[ server { server_name my_server; - listen 62349; + listen ]] .. DP_MOCK_PORT .. [[; location = "/hello" { content_by_lua_block { @@ -28,7 +30,7 @@ local fixtures_cp = { fixtures_cp.http_mock.my_server_block = [[ server { server_name my_server; - listen 62350; + listen ]] .. CP_MOCK_PORT .. [[; location = "/hello" { content_by_lua_block { @@ -83,7 +85,7 @@ for _, strategy in helpers.each_strategy() do end) it("kong.cluster.get_id() in Hybrid mode", function() - proxy_client = helpers.http_client(helpers.get_proxy_ip(false), 62350) + proxy_client = helpers.http_client(helpers.get_proxy_ip(false), CP_MOCK_PORT) local res = proxy_client:get("/hello") local cp_cluster_id = assert.response(res).has_status(200) @@ -93,7 +95,7 @@ for _, strategy in helpers.each_strategy() do proxy_client:close() helpers.wait_until(function() - proxy_client = helpers.http_client(helpers.get_proxy_ip(false), 62349) + proxy_client = helpers.http_client(helpers.get_proxy_ip(false), DP_MOCK_PORT) local res = proxy_client:get("/hello") local body = assert.response(res).has_status(200) proxy_client:close() diff --git a/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua b/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua index 914bfc9a52b..409ed8096ab 100644 --- a/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua +++ b/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua @@ -3,7 +3,7 @@ local cjson = require "cjson" local pl_file = require "pl.file" local PLUGIN_NAME = "ai-proxy" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then describe(PLUGIN_NAME .. ": (access) [#" .. strategy .. "]", function() diff --git a/spec/03-plugins/38-ai-proxy/03-anthropic_integration_spec.lua b/spec/03-plugins/38-ai-proxy/03-anthropic_integration_spec.lua index a02d77463b3..a9feb38baec 100644 --- a/spec/03-plugins/38-ai-proxy/03-anthropic_integration_spec.lua +++ b/spec/03-plugins/38-ai-proxy/03-anthropic_integration_spec.lua @@ -3,7 +3,7 @@ local cjson = require "cjson" local pl_file = require "pl.file" local PLUGIN_NAME = "ai-proxy" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then describe(PLUGIN_NAME .. ": (access) [#" .. strategy .. "]", function() diff --git a/spec/03-plugins/38-ai-proxy/04-cohere_integration_spec.lua b/spec/03-plugins/38-ai-proxy/04-cohere_integration_spec.lua index cf473505a65..621fbcd786b 100644 --- a/spec/03-plugins/38-ai-proxy/04-cohere_integration_spec.lua +++ b/spec/03-plugins/38-ai-proxy/04-cohere_integration_spec.lua @@ -3,7 +3,7 @@ local cjson = require "cjson" local pl_file = require "pl.file" local PLUGIN_NAME = "ai-proxy" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then describe(PLUGIN_NAME .. ": (access) [#" .. strategy .. "]", function() diff --git a/spec/03-plugins/38-ai-proxy/05-azure_integration_spec.lua b/spec/03-plugins/38-ai-proxy/05-azure_integration_spec.lua index f6aa33efd7a..d976689f92a 100644 --- a/spec/03-plugins/38-ai-proxy/05-azure_integration_spec.lua +++ b/spec/03-plugins/38-ai-proxy/05-azure_integration_spec.lua @@ -3,7 +3,7 @@ local cjson = require "cjson" local pl_file = require "pl.file" local PLUGIN_NAME = "ai-proxy" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then describe(PLUGIN_NAME .. ": (access) [#" .. strategy .. "]", function() diff --git a/spec/03-plugins/38-ai-proxy/06-mistral_integration_spec.lua b/spec/03-plugins/38-ai-proxy/06-mistral_integration_spec.lua index 7a82c7614fc..16bcea29ecd 100644 --- a/spec/03-plugins/38-ai-proxy/06-mistral_integration_spec.lua +++ b/spec/03-plugins/38-ai-proxy/06-mistral_integration_spec.lua @@ -3,7 +3,7 @@ local cjson = require "cjson" local pl_file = require "pl.file" local PLUGIN_NAME = "ai-proxy" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then describe(PLUGIN_NAME .. ": (access) [#" .. strategy .. "]", function() diff --git a/spec/03-plugins/38-ai-proxy/07-llama2_integration_spec.lua b/spec/03-plugins/38-ai-proxy/07-llama2_integration_spec.lua index ef0f0172976..b41aaa6e11a 100644 --- a/spec/03-plugins/38-ai-proxy/07-llama2_integration_spec.lua +++ b/spec/03-plugins/38-ai-proxy/07-llama2_integration_spec.lua @@ -3,7 +3,7 @@ local cjson = require "cjson" local pl_file = require "pl.file" local PLUGIN_NAME = "ai-proxy" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then describe(PLUGIN_NAME .. ": (access) [#" .. strategy .. "]", function() diff --git a/spec/03-plugins/38-ai-proxy/08-encoding_integration_spec.lua b/spec/03-plugins/38-ai-proxy/08-encoding_integration_spec.lua index 371f99b11f2..b11c16a973f 100644 --- a/spec/03-plugins/38-ai-proxy/08-encoding_integration_spec.lua +++ b/spec/03-plugins/38-ai-proxy/08-encoding_integration_spec.lua @@ -3,7 +3,7 @@ local cjson = require "cjson" local inflate_gzip = require("kong.tools.gzip").inflate_gzip local PLUGIN_NAME = "ai-proxy" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() local openai_driver = require("kong.llm.drivers.openai") diff --git a/spec/03-plugins/39-ai-request-transformer/01-transformer_spec.lua b/spec/03-plugins/39-ai-request-transformer/01-transformer_spec.lua index 5f4bd4cdc5d..de6b0d25416 100644 --- a/spec/03-plugins/39-ai-request-transformer/01-transformer_spec.lua +++ b/spec/03-plugins/39-ai-request-transformer/01-transformer_spec.lua @@ -2,7 +2,7 @@ local llm_class = require("kong.llm") local helpers = require "spec.helpers" local cjson = require "cjson" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() local PLUGIN_NAME = "ai-request-transformer" local FORMATS = { diff --git a/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua b/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua index 1d0ff2a00ba..7ddedad91fb 100644 --- a/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua +++ b/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua @@ -1,7 +1,7 @@ local helpers = require "spec.helpers" local cjson = require "cjson" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() local PLUGIN_NAME = "ai-request-transformer" local OPENAI_FLAT_RESPONSE = { diff --git a/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua b/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua index 9f724629da9..40c55add51d 100644 --- a/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua +++ b/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua @@ -1,7 +1,7 @@ local helpers = require "spec.helpers" local cjson = require "cjson" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() local PLUGIN_NAME = "ai-response-transformer" local OPENAI_INSTRUCTIONAL_RESPONSE = { From 18c1b40970e4bb76b2fcf2c4b156fd13edd663a5 Mon Sep 17 00:00:00 2001 From: Qi Date: Mon, 19 Feb 2024 12:16:41 +0800 Subject: [PATCH 48/91] tests(plugin/ai-response-transformer): replace mocking server by http_mock module --- .../01-transformer_spec.lua | 155 +++++++----------- 1 file changed, 63 insertions(+), 92 deletions(-) diff --git a/spec/03-plugins/40-ai-response-transformer/01-transformer_spec.lua b/spec/03-plugins/40-ai-response-transformer/01-transformer_spec.lua index c13f9dc27ed..6409fbcafef 100644 --- a/spec/03-plugins/40-ai-response-transformer/01-transformer_spec.lua +++ b/spec/03-plugins/40-ai-response-transformer/01-transformer_spec.lua @@ -1,8 +1,10 @@ local llm_class = require("kong.llm") local helpers = require "spec.helpers" local cjson = require "cjson" +local http_mock = require "spec.helpers.http_mock" +local pl_path = require "pl.path" -local MOCK_PORT = 62349 +local MOCK_PORT = helpers.get_available_port() local PLUGIN_NAME = "ai-response-transformer" local OPENAI_INSTRUCTIONAL_RESPONSE = { @@ -13,7 +15,7 @@ local OPENAI_INSTRUCTIONAL_RESPONSE = { options = { max_tokens = 512, temperature = 0.5, - upstream_url = "http://"..helpers.mock_upstream_host..":"..MOCK_PORT.."/instructions" + upstream_url = "http://" .. helpers.mock_upstream_host .. ":" .. MOCK_PORT .. "/instructions" }, }, auth = { @@ -55,98 +57,67 @@ local EXPECTED_RESULT = { } local SYSTEM_PROMPT = "You are a mathematician. " - .. "Multiply all numbers in my JSON request, by 2. Return me this message: " - .. "{\"status\": 400, \"headers: {\"content-type\": \"application/xml\"}, \"body\": \"OUTPUT\"} " - .. "where 'OUTPUT' is the result but transformed into XML format." - - -local client - - -for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then - - describe(PLUGIN_NAME .. ": (unit)", function() - - lazy_setup(function() - -- set up provider fixtures - local fixtures = { - http_mock = {}, - } - - fixtures.http_mock.openai = [[ - server { - server_name llm; - listen ]]..MOCK_PORT..[[; - - default_type 'application/json'; - - location ~/instructions { - content_by_lua_block { - local pl_file = require "pl.file" - ngx.print(pl_file.read("spec/fixtures/ai-proxy/openai/request-transformer/response-with-instructions.json")) - } - } - } - ]] - - -- start kong - assert(helpers.start_kong({ - -- set the strategy - database = strategy, - -- use the custom test template to create a local mock server - nginx_conf = "spec/fixtures/custom_nginx.template", - -- make sure our plugin gets loaded - plugins = "bundled," .. PLUGIN_NAME, - -- write & load declarative config, only if 'strategy=off' - declarative_config = strategy == "off" and helpers.make_yaml_file() or nil, - }, nil, nil, fixtures)) - end) - - lazy_teardown(function() - helpers.stop_kong(nil, true) - end) - - before_each(function() - client = helpers.proxy_client() - end) + .. "Multiply all numbers in my JSON request, by 2. Return me this message: " + .. "{\"status\": 400, \"headers: {\"content-type\": \"application/xml\"}, \"body\": \"OUTPUT\"} " + .. "where 'OUTPUT' is the result but transformed into XML format." + + +describe(PLUGIN_NAME .. ": (unit)", function() + local mock + local mock_response_file = pl_path.abspath( + "spec/fixtures/ai-proxy/openai/request-transformer/response-with-instructions.json") + + lazy_setup(function() + mock = http_mock.new(tostring(MOCK_PORT), { + ["/instructions"] = { + content = string.format([[ + local pl_file = require "pl.file" + ngx.header["Content-Type"] = "application/json" + ngx.say(pl_file.read("%s")) + ]], mock_response_file), + }, + }, { + hostname = "llm", + }) - after_each(function() - if client then client:close() end - end) + assert(mock:start()) + end) - describe("openai transformer tests, specific response", function() - it("transforms request based on LLM instructions, with response transformation instructions format", function() - local llm = llm_class:new(OPENAI_INSTRUCTIONAL_RESPONSE, {}) - assert.truthy(llm) - - local result, err = llm:ai_introspect_body( - REQUEST_BODY, -- request body - SYSTEM_PROMPT, -- conf.prompt - {}, -- http opts - nil -- transformation extraction pattern (loose json) - ) - - assert.is_nil(err) - - local table_result, err = cjson.decode(result) - assert.is_nil(err) - assert.same(EXPECTED_RESULT, table_result) - - -- parse in response string format - local headers, body, status, err = llm:parse_json_instructions(result) - assert.is_nil(err) - assert.same({ ["content-type"] = "application/xml"}, headers) - assert.same(209, status) - assert.same(EXPECTED_RESULT.body, body) - - -- parse in response table format - headers, body, status, err = llm:parse_json_instructions(table_result) - assert.is_nil(err) - assert.same({ ["content-type"] = "application/xml"}, headers) - assert.same(209, status) - assert.same(EXPECTED_RESULT.body, body) - end) + lazy_teardown(function() + assert(mock:stop()) + end) + describe("openai transformer tests, specific response", function() + it("transforms request based on LLM instructions, with response transformation instructions format", function() + local llm = llm_class:new(OPENAI_INSTRUCTIONAL_RESPONSE, {}) + assert.truthy(llm) + + local result, err = llm:ai_introspect_body( + REQUEST_BODY, -- request body + SYSTEM_PROMPT, -- conf.prompt + {}, -- http opts + nil -- transformation extraction pattern (loose json) + ) + + assert.is_nil(err) + + local table_result, err = cjson.decode(result) + assert.is_nil(err) + assert.same(EXPECTED_RESULT, table_result) + + -- parse in response string format + local headers, body, status, err = llm:parse_json_instructions(result) + assert.is_nil(err) + assert.same({ ["content-type"] = "application/xml" }, headers) + assert.same(209, status) + assert.same(EXPECTED_RESULT.body, body) + + -- parse in response table format + headers, body, status, err = llm:parse_json_instructions(table_result) + assert.is_nil(err) + assert.same({ ["content-type"] = "application/xml" }, headers) + assert.same(209, status) + assert.same(EXPECTED_RESULT.body, body) end) end) -end end +end) From df48729b257e4ccb15d84b4fc428cf7cec38e40d Mon Sep 17 00:00:00 2001 From: Qi Date: Mon, 19 Feb 2024 05:29:18 +0000 Subject: [PATCH 49/91] tests(hybrid): reset and bootstrap DB before starting CP Some tests might change the DB in front of this test, which causes incompatible data to prevent the CP from starting up. --- spec/02-integration/09-hybrid_mode/02-start_stop_spec.lua | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/spec/02-integration/09-hybrid_mode/02-start_stop_spec.lua b/spec/02-integration/09-hybrid_mode/02-start_stop_spec.lua index 91ec0eb72a7..43cbf6ec988 100644 --- a/spec/02-integration/09-hybrid_mode/02-start_stop_spec.lua +++ b/spec/02-integration/09-hybrid_mode/02-start_stop_spec.lua @@ -154,7 +154,10 @@ end) describe("when CP exits before DP", function() local need_exit = true - setup(function() + lazy_setup(function() + -- reset and bootstrap DB before starting CP + helpers.get_db_utils(nil) + assert(helpers.start_kong({ role = "control_plane", prefix = "servroot1", @@ -179,7 +182,7 @@ describe("when CP exits before DP", function() })) end) - teardown(function() + lazy_teardown(function() if need_exit then helpers.stop_kong("servroot1") end From acffb9d52ec1ec25a11b80f7e4887b06e8fb38f6 Mon Sep 17 00:00:00 2001 From: Qi Date: Mon, 19 Feb 2024 06:28:43 +0000 Subject: [PATCH 50/91] tests(plugin/ai-request-transformer): replace mocking server by http_mock module --- .../01-transformer_spec.lua | 221 ++++++++---------- 1 file changed, 95 insertions(+), 126 deletions(-) diff --git a/spec/03-plugins/39-ai-request-transformer/01-transformer_spec.lua b/spec/03-plugins/39-ai-request-transformer/01-transformer_spec.lua index de6b0d25416..db1aef512b0 100644 --- a/spec/03-plugins/39-ai-request-transformer/01-transformer_spec.lua +++ b/spec/03-plugins/39-ai-request-transformer/01-transformer_spec.lua @@ -1,6 +1,8 @@ local llm_class = require("kong.llm") local helpers = require "spec.helpers" local cjson = require "cjson" +local http_mock = require "spec.helpers.http_mock" +local pl_path = require "pl.path" local MOCK_PORT = helpers.get_available_port() local PLUGIN_NAME = "ai-request-transformer" @@ -14,7 +16,7 @@ local FORMATS = { options = { max_tokens = 512, temperature = 0.5, - upstream_url = "http://"..helpers.mock_upstream_host..":"..MOCK_PORT.."/chat/openai" + upstream_url = "http://" .. helpers.mock_upstream_host .. ":" .. MOCK_PORT .. "/chat/openai" }, }, auth = { @@ -30,7 +32,7 @@ local FORMATS = { options = { max_tokens = 512, temperature = 0.5, - upstream_url = "http://"..helpers.mock_upstream_host..":"..MOCK_PORT.."/chat/cohere" + upstream_url = "http://" .. helpers.mock_upstream_host .. ":" .. MOCK_PORT .. "/chat/cohere" }, }, auth = { @@ -46,7 +48,7 @@ local FORMATS = { options = { max_tokens = 512, temperature = 0.5, - upstream_url = "http://"..helpers.mock_upstream_host..":"..MOCK_PORT.."/chat/anthropic" + upstream_url = "http://" .. helpers.mock_upstream_host .. ":" .. MOCK_PORT .. "/chat/anthropic" }, }, auth = { @@ -62,7 +64,7 @@ local FORMATS = { options = { max_tokens = 512, temperature = 0.5, - upstream_url = "http://"..helpers.mock_upstream_host..":"..MOCK_PORT.."/chat/azure" + upstream_url = "http://" .. helpers.mock_upstream_host .. ":" .. MOCK_PORT .. "/chat/azure" }, }, auth = { @@ -78,7 +80,7 @@ local FORMATS = { options = { max_tokens = 512, temperature = 0.5, - upstream_url = "http://"..helpers.mock_upstream_host..":"..MOCK_PORT.."/chat/llama2", + upstream_url = "http://" .. helpers.mock_upstream_host .. ":" .. MOCK_PORT .. "/chat/llama2", llama2_format = "raw", }, }, @@ -95,7 +97,7 @@ local FORMATS = { options = { max_tokens = 512, temperature = 0.5, - upstream_url = "http://"..helpers.mock_upstream_host..":"..MOCK_PORT.."/chat/mistral", + upstream_url = "http://" .. helpers.mock_upstream_host .. ":" .. MOCK_PORT .. "/chat/mistral", mistral_format = "ollama", }, }, @@ -114,7 +116,7 @@ local OPENAI_NOT_JSON = { options = { max_tokens = 512, temperature = 0.5, - upstream_url = "http://"..helpers.mock_upstream_host..":"..MOCK_PORT.."/not-json" + upstream_url = "http://" .. helpers.mock_upstream_host .. ":" .. MOCK_PORT .. "/not-json" }, }, auth = { @@ -152,131 +154,77 @@ local EXPECTED_RESULT = { } local SYSTEM_PROMPT = "You are a mathematician. " - .. "Multiply all numbers in my JSON request, by 2. Return me the JSON output only" + .. "Multiply all numbers in my JSON request, by 2. Return me the JSON output only" -local client +describe(PLUGIN_NAME .. ": (unit)", function() + local mock + local ai_proxy_fixtures_dir = pl_path.abspath("spec/fixtures/ai-proxy/") + lazy_setup(function() + mock = http_mock.new(MOCK_PORT, { + ["~/chat/(?[a-z0-9]+)"] = { + content = string.format([[ + local base_dir = "%s/" + ngx.header["Content-Type"] = "application/json" -for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then + local pl_file = require "pl.file" + local json = require("cjson.safe") + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) - describe(PLUGIN_NAME .. ": (unit)", function() - - lazy_setup(function() - -- set up provider fixtures - local fixtures = { - http_mock = {}, - } - - fixtures.http_mock.openai = [[ - server { - server_name llm; - listen ]]..MOCK_PORT..[[; - - default_type 'application/json'; - - location ~/chat/(?[a-z0-9]+) { - content_by_lua_block { - local pl_file = require "pl.file" - local json = require("cjson.safe") + local token = ngx.req.get_headers()["authorization"] + local token_query = ngx.req.get_uri_args()["apikey"] + if token == "Bearer " .. ngx.var.provider .. "-key" or token_query == "$1-key" or body.apikey == "$1-key" then ngx.req.read_body() local body, err = ngx.req.get_body_data() body, err = json.decode(body) - local token = ngx.req.get_headers()["authorization"] - local token_query = ngx.req.get_uri_args()["apikey"] + if err or (body.messages == ngx.null) then + ngx.status = 400 + ngx.say(pl_file.read(base_dir .. ngx.var.provider .. "/llm-v1-chat/responses/bad_request.json")) - if token == "Bearer " .. ngx.var.provider .. "-key" or token_query == "$1-key" or body.apikey == "$1-key" then - ngx.req.read_body() - local body, err = ngx.req.get_body_data() - body, err = json.decode(body) - - if err or (body.messages == ngx.null) then - ngx.status = 400 - ngx.print(pl_file.read("spec/fixtures/ai-proxy/" .. ngx.var.provider .. "/llm-v1-chat/responses/bad_request.json")) - else - ngx.status = 200 - ngx.print(pl_file.read("spec/fixtures/ai-proxy/" .. ngx.var.provider .. "/request-transformer/response-in-json.json")) - end else - ngx.status = 401 - ngx.print(pl_file.read("spec/fixtures/ai-proxy/" .. ngx.var.provider .. "/llm-v1-chat/responses/unauthorized.json")) + ngx.status = 200 + ngx.say(pl_file.read(base_dir .. ngx.var.provider .. "/request-transformer/response-in-json.json")) end - } - } - - location ~/not-json { - content_by_lua_block { - local pl_file = require "pl.file" - ngx.print(pl_file.read("spec/fixtures/ai-proxy/openai/request-transformer/response-not-json.json")) - } - } - } - ]] - - -- start kong - assert(helpers.start_kong({ - -- set the strategy - database = strategy, - -- use the custom test template to create a local mock server - nginx_conf = "spec/fixtures/custom_nginx.template", - -- make sure our plugin gets loaded - plugins = "bundled," .. PLUGIN_NAME, - -- write & load declarative config, only if 'strategy=off' - declarative_config = strategy == "off" and helpers.make_yaml_file() or nil, - }, nil, nil, fixtures)) - end) - - lazy_teardown(function() - helpers.stop_kong(nil, true) - end) - - before_each(function() - client = helpers.proxy_client() - end) - - after_each(function() - if client then client:close() end - end) - - for name, format_options in pairs(FORMATS) do - - describe(name .. " transformer tests, exact json response", function() - - it("transforms request based on LLM instructions", function() - local llm = llm_class:new(format_options, {}) - assert.truthy(llm) - local result, err = llm:ai_introspect_body( - REQUEST_BODY, -- request body - SYSTEM_PROMPT, -- conf.prompt - {}, -- http opts - nil -- transformation extraction pattern - ) - - assert.is_nil(err) - - result, err = cjson.decode(result) - assert.is_nil(err) + else + ngx.status = 401 + ngx.say(pl_file.read(base_dir .. ngx.var.provider .. "/llm-v1-chat/responses/unauthorized.json")) + end + ]], ai_proxy_fixtures_dir), + }, + ["~/not-json"] = { + content = string.format([[ + local base_dir = "%s/" + local pl_file = require "pl.file" + ngx.header["Content-Type"] = "application/json" + ngx.print(pl_file.read(base_dir .. "openai/request-transformer/response-not-json.json")) + ]], ai_proxy_fixtures_dir), + }, + }) - assert.same(EXPECTED_RESULT, result) - end) - end) + assert(mock:start()) + end) - - end + lazy_teardown(function() + assert(mock:stop()) + end) - describe("openai transformer tests, pattern matchers", function() - it("transforms request based on LLM instructions, with json extraction pattern", function() - local llm = llm_class:new(OPENAI_NOT_JSON, {}) + for name, format_options in pairs(FORMATS) do + describe(name .. " transformer tests, exact json response", function() + it("transforms request based on LLM instructions", function() + local llm = llm_class:new(format_options, {}) assert.truthy(llm) local result, err = llm:ai_introspect_body( - REQUEST_BODY, -- request body - SYSTEM_PROMPT, -- conf.prompt - {}, -- http opts - "\\{((.|\n)*)\\}" -- transformation extraction pattern (loose json) + REQUEST_BODY, -- request body + SYSTEM_PROMPT, -- conf.prompt + {}, -- http opts + nil -- transformation extraction pattern ) assert.is_nil(err) @@ -286,22 +234,43 @@ for _, strategy in helpers.all_strategies() do if strategy ~= "cassandra" then assert.same(EXPECTED_RESULT, result) end) + end) + end - it("transforms request based on LLM instructions, but fails to match pattern", function() - local llm = llm_class:new(OPENAI_NOT_JSON, {}) - assert.truthy(llm) + describe("openai transformer tests, pattern matchers", function() + it("transforms request based on LLM instructions, with json extraction pattern", function() + local llm = llm_class:new(OPENAI_NOT_JSON, {}) + assert.truthy(llm) - local result, err = llm:ai_introspect_body( - REQUEST_BODY, -- request body - SYSTEM_PROMPT, -- conf.prompt - {}, -- http opts - "\\#*\\=" -- transformation extraction pattern (loose json) - ) + local result, err = llm:ai_introspect_body( + REQUEST_BODY, -- request body + SYSTEM_PROMPT, -- conf.prompt + {}, -- http opts + "\\{((.|\n)*)\\}" -- transformation extraction pattern (loose json) + ) - assert.is_nil(result) - assert.is_not_nil(err) - assert.same("AI response did not match specified regular expression", err) - end) + assert.is_nil(err) + + result, err = cjson.decode(result) + assert.is_nil(err) + + assert.same(EXPECTED_RESULT, result) end) + + it("transforms request based on LLM instructions, but fails to match pattern", function() + local llm = llm_class:new(OPENAI_NOT_JSON, {}) + assert.truthy(llm) + + local result, err = llm:ai_introspect_body( + REQUEST_BODY, -- request body + SYSTEM_PROMPT, -- conf.prompt + {}, -- http opts + "\\#*\\=" -- transformation extraction pattern (loose json) + ) + + assert.is_nil(result) + assert.is_not_nil(err) + assert.same("AI response did not match specified regular expression", err) + end) -- it end) -end end +end) From 9a7498cda2b01f020a0b7fabd41dcd62c83c8dfb Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Tue, 20 Feb 2024 11:31:54 +0200 Subject: [PATCH 51/91] fix(vault): postpone vault reference resolving on init_worker (#12554) ### Summary It was reported on KAG-2907 that existing LMDB database with secrets can lead to an error when resolving secrets on init worker: ``` resty/http.lua:74: API disabled in the context of init_worker_by_lua* stack traceback: [C]: in function 'co_create' ``` This fixes the issue. Signed-off-by: Aapo Talvensaari --- .../unreleased/kong/fix-vault-init-worker.yml | 3 + kong/db/schema/init.lua | 6 +- kong/pdk/vault.lua | 119 ++++++- .../02-cmd/02-start_stop_spec.lua | 327 +++++++++++++++++- 4 files changed, 431 insertions(+), 24 deletions(-) create mode 100644 changelog/unreleased/kong/fix-vault-init-worker.yml diff --git a/changelog/unreleased/kong/fix-vault-init-worker.yml b/changelog/unreleased/kong/fix-vault-init-worker.yml new file mode 100644 index 00000000000..d5315d0d7c2 --- /dev/null +++ b/changelog/unreleased/kong/fix-vault-init-worker.yml @@ -0,0 +1,3 @@ +message: fix vault initialization by postponing vault reference resolving on init_worker +type: bugfix +scope: Core diff --git a/kong/db/schema/init.lua b/kong/db/schema/init.lua index a910df28a5f..89862852ab0 100644 --- a/kong/db/schema/init.lua +++ b/kong/db/schema/init.lua @@ -1778,7 +1778,7 @@ function Schema:process_auto_fields(data, context, nulls, opts) if err then kong.log.warn("unable to resolve reference ", value, " (", err, ")") else - kong.log.warn("unable to resolve reference ", value) + kong.log.notice("unable to resolve reference ", value) end value = "" @@ -1817,7 +1817,7 @@ function Schema:process_auto_fields(data, context, nulls, opts) if err then kong.log.warn("unable to resolve reference ", value[i], " (", err, ")") else - kong.log.warn("unable to resolve reference ", value[i]) + kong.log.notice("unable to resolve reference ", value[i]) end value[i] = "" @@ -1863,7 +1863,7 @@ function Schema:process_auto_fields(data, context, nulls, opts) if err then kong.log.warn("unable to resolve reference ", v, " (", err, ")") else - kong.log.warn("unable to resolve reference ", v) + kong.log.notice("unable to resolve reference ", v) end value[k] = "" diff --git a/kong/pdk/vault.lua b/kong/pdk/vault.lua index 3dbcfe46bf9..347c3d050f8 100644 --- a/kong/pdk/vault.lua +++ b/kong/pdk/vault.lua @@ -199,6 +199,8 @@ local function new(self) local SECRETS_CACHE = ngx.shared.kong_secrets local SECRETS_CACHE_MIN_TTL = ROTATION_INTERVAL * 2 + local INIT_SECRETS = {} + local INIT_WORKER_SECRETS = {} local STRATEGIES = {} local SCHEMAS = {} local CONFIGS = {} @@ -618,7 +620,7 @@ local function new(self) if not vault then if err then - self.log.notice("could not find vault (", prefix, "): ", err) + return nil, fmt("could not find vault (%s): %s", prefix, err) end return nil, fmt("could not find vault (%s)", prefix) @@ -823,10 +825,15 @@ local function new(self) -- If the value is not found in these caches and `cache_only` is not `truthy`, -- it attempts to retrieve the value from a vault. -- + -- On init worker phase the resolving of the secrets is postponed to a timer, + -- and in this case the function returns `""` when it fails to find a value + -- in a cache. This is because of current limitations in platform that disallows + -- using cosockets/coroutines in that phase. + -- -- @local -- @function get -- @tparam string reference the reference key to lookup - -- @tparam boolean cache_only optional boolean flag (if set to `true`, + -- @tparam[opt] boolean cache_only optional boolean flag (if set to `true`, -- the function will not attempt to retrieve the value from the vault) -- @treturn string the retrieved value corresponding to the provided reference, -- or `nil` (when found negatively cached, or in case of an error) @@ -843,19 +850,40 @@ local function new(self) local strategy, err, config, cache_key, parsed_reference = get_strategy(reference) if not strategy then + -- this can fail on init as the lmdb cannot be accessed and secondly, + -- because the data is not yet inserted into LMDB when using KONG_DECLARATIVE_CONFIG. + if get_phase() == "init" then + if not INIT_SECRETS[cache_key] then + INIT_SECRETS[reference] = true + INIT_SECRETS[#INIT_SECRETS + 1] = reference + end + + return "" + end + return nil, err end value = SECRETS_CACHE:get(cache_key) - if cache_only and not value then - return nil, "could not find cached value" - end - if value == NEGATIVELY_CACHED_VALUE then return nil end if not value then + if cache_only then + return nil, "could not find cached value" + end + + -- this can fail on init worker as there is no cosockets / coroutines available + if get_phase() == "init_worker" then + if not INIT_WORKER_SECRETS[cache_key] then + INIT_WORKER_SECRETS[cache_key] = true + INIT_WORKER_SECRETS[#INIT_WORKER_SECRETS + 1] = cache_key + end + + return "" + end + return get_from_vault(reference, strategy, config, cache_key, parsed_reference) end @@ -885,7 +913,7 @@ local function new(self) -- update_from_cache("{vault://env/example}", record, "field" }) local function update_from_cache(reference, record, field) local value, err = get(reference, true) - if not value then + if err then self.log.warn("error updating secret reference ", reference, ": ", err) end @@ -1238,19 +1266,20 @@ local function new(self) --- - -- Function `rotate_secrets` rotates the secrets in the shared dictionary cache (SHDICT). + -- Function `rotate_secrets` rotates the secrets. -- - -- It iterates over all keys in the SHDICT and, if a key corresponds to a reference and the + -- It iterates over all keys in the secrets and, if a key corresponds to a reference and the -- ttl of the key is less than or equal to the resurrection period, it refreshes the value -- associated with the reference. -- -- @local -- @function rotate_secrets - -- @treturn boolean `true` after it has finished iterating over all keys in the SHDICT - local function rotate_secrets() + -- @tparam table secrets the secrets to rotate + -- @treturn boolean `true` after it has finished iterating over all keys in the secrets + local function rotate_secrets(secrets) local phase = get_phase() local caching_strategy = get_caching_strategy() - for _, cache_key in ipairs(SECRETS_CACHE:get_keys(0)) do + for _, cache_key in ipairs(secrets) do yield(true, phase) local ok, err = rotate_secret(cache_key, caching_strategy) @@ -1264,20 +1293,69 @@ local function new(self) --- - -- A recurring secrets rotation timer handler. + -- Function `rotate_secrets_cache` rotates the secrets in the shared dictionary cache. + -- + -- @local + -- @function rotate_secrets_cache + -- @treturn boolean `true` after it has finished iterating over all keys in the shared dictionary cache + local function rotate_secrets_cache() + return rotate_secrets(SECRETS_CACHE:get_keys(0)) + end + + + --- + -- Function `rotate_secrets_init_worker` rotates the secrets in init worker cache + -- + -- On init worker the secret resolving is postponed to a timer because init worker + -- cannot cosockets / coroutines, and there is no other workaround currently. + -- + -- @local + -- @function rotate_secrets_init_worker + -- @treturn boolean `true` after it has finished iterating over all keys in the init worker cache + local function rotate_secrets_init_worker() + local _, err, err2 + if INIT_SECRETS then + _, err = rotate_references(INIT_SECRETS) + end + + if INIT_WORKER_SECRETS then + _, err2 = rotate_secrets(INIT_WORKER_SECRETS) + end + + if err or err2 then + return nil, err or err2 + end + + return true + end + + + --- + -- A secrets rotation timer handler. + -- + -- Uses a node-level mutex to prevent multiple threads/workers running it the same time. -- -- @local -- @function rotate_secrets_timer - -- @tparam boolean premature `true` if server is shutting down. - local function rotate_secrets_timer(premature) + -- @tparam boolean premature `true` if server is shutting down + -- @tparam[opt] boolean init `true` when this is a one of init_worker timer run + -- By default rotates the secrets in shared dictionary cache. + local function rotate_secrets_timer(premature, init) if premature then - return + return true end - local ok, err = concurrency.with_worker_mutex(ROTATION_MUTEX_OPTS, rotate_secrets) + local ok, err = concurrency.with_worker_mutex(ROTATION_MUTEX_OPTS, init and rotate_secrets_init_worker or rotate_secrets_cache) if not ok and err ~= "timeout" then self.log.err("rotating secrets failed (", err, ")") end + + if init then + INIT_SECRETS = nil + INIT_WORKER_SECRETS = nil + end + + return true end @@ -1316,7 +1394,7 @@ local function new(self) -- refresh all the secrets local _, err = self.timer:named_at("secret-rotation-on-crud-event", 0, rotate_secrets_timer) if err then - self.log.err("could not schedule timer to rotate vault secret references: ", err) + self.log.err("could not schedule timer to rotate vault secret references on crud event: ", err) end end @@ -1345,6 +1423,11 @@ local function new(self) if err then self.log.err("could not schedule timer to rotate vault secret references: ", err) end + + local _, err = self.timer:named_at("secret-rotation-on-init", 0, rotate_secrets_timer, true) + if err then + self.log.err("could not schedule timer to rotate vault secret references on init: ", err) + end end diff --git a/spec/02-integration/02-cmd/02-start_stop_spec.lua b/spec/02-integration/02-cmd/02-start_stop_spec.lua index 2c831503a7e..48d0554acba 100644 --- a/spec/02-integration/02-cmd/02-start_stop_spec.lua +++ b/spec/02-integration/02-cmd/02-start_stop_spec.lua @@ -130,6 +130,7 @@ describe("kong start/stop #" .. strategy, function() end) it("resolves referenced secrets", function() + helpers.clean_logfile() helpers.setenv("PG_PASSWORD", "dummy") local _, stderr, stdout = assert(kong_exec("start", { @@ -169,7 +170,7 @@ describe("kong start/stop #" .. strategy, function() assert(kong_exec("stop", { prefix = PREFIX })) end) - it("start/stop stops without error when references cannot be resolved #test", function() + it("start/stop stops without error when references cannot be resolved", function() helpers.setenv("PG_PASSWORD", "dummy") local _, stderr, stdout = assert(kong_exec("start", { @@ -226,6 +227,7 @@ describe("kong start/stop #" .. strategy, function() end) it("should not add [emerg], [alert], [crit], [error] or [warn] lines to error log", function() + helpers.clean_logfile() assert(helpers.kong_exec("start ", { prefix = helpers.test_conf.prefix, stream_listen = "127.0.0.1:9022", @@ -634,6 +636,8 @@ describe("kong start/stop #" .. strategy, function() if strategy == "off" then it("does not start with an invalid declarative config file", function() + helpers.clean_logfile() + local yaml_file = helpers.make_yaml_file [[ _format_version: "1.1" services: @@ -665,6 +669,9 @@ describe("kong start/stop #" .. strategy, function() end) it("dbless can reference secrets in declarative configuration", function() + helpers.clean_logfile() + helpers.setenv("SESSION_SECRET", "top-secret-value") + local yaml_file = helpers.make_yaml_file [[ _format_version: "3.0" _transform: true @@ -672,10 +679,11 @@ describe("kong start/stop #" .. strategy, function() - name: session instance_name: session config: - secret: "{vault://mocksocket/test}" + secret: "{vault://mocksocket/session-secret}" ]] finally(function() + helpers.unsetenv("SESSION_SECRET") os.remove(yaml_file) end) @@ -692,12 +700,325 @@ describe("kong start/stop #" .. strategy, function() database = "off", declarative_config = yaml_file, vaults = "mocksocket", - plugins = "session" + plugins = "session", }) + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + assert.truthy(ok) assert.not_matches("error", err) assert.logfile().has.no.line("[error]", true, 0) + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.no.line(" {vault://mocksocket/session-secret}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + assert(helpers.restart_kong({ + database = "off", + vaults = "mocksocket", + plugins = "session", + declarative_config = "", + })) + + assert.logfile().has.no.line("[error]", true, 0) + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.no.line(" {vault://mocksocket/session-secret}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + + assert(helpers.reload_kong("off", "reload --prefix " .. helpers.test_conf.prefix, { + database = "off", + vaults = "mocksocket", + plugins = "session", + declarative_config = "", + })) + + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.no.line(" {vault://mocksocket/session-secret}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + + end) + + it("dbless does not fail fatally when referencing secrets doesn't work in declarative configuration", function() + helpers.clean_logfile() + + local yaml_file = helpers.make_yaml_file [[ + _format_version: "3.0" + _transform: true + plugins: + - name: session + instance_name: session + config: + secret: "{vault://mocksocket/session-secret-unknown}" + ]] + + finally(function() + os.remove(yaml_file) + end) + + helpers.setenv("KONG_LUA_PATH_OVERRIDE", "./spec/fixtures/custom_vaults/?.lua;./spec/fixtures/custom_vaults/?/init.lua;;") + helpers.get_db_utils(strategy, { + "vaults", + }, { + "session" + }, { + "mocksocket" + }) + + local ok, err = helpers.start_kong({ + database = "off", + declarative_config = yaml_file, + vaults = "mocksocket", + plugins = "session", + }) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + + assert.truthy(ok) + assert.not_matches("error", err) + assert.logfile().has.no.line("[error]", true, 0) + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.line(" {vault://mocksocket/session-secret-unknown}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + assert(helpers.restart_kong({ + database = "off", + vaults = "mocksocket", + plugins = "session", + declarative_config = "", + })) + + assert.logfile().has.no.line("[error]", true, 0) + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.line(" {vault://mocksocket/session-secret-unknown}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + + assert(helpers.reload_kong("off", "reload --prefix " .. helpers.test_conf.prefix, { + database = "off", + vaults = "mocksocket", + plugins = "session", + declarative_config = "", + })) + + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.line(" {vault://mocksocket/session-secret-unknown}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + end) + + it("dbless can reference secrets in declarative configuration using vault entities", function() + helpers.clean_logfile() + helpers.setenv("SESSION_SECRET_AGAIN", "top-secret-value") + + local yaml_file = helpers.make_yaml_file [[ + _format_version: "3.0" + _transform: true + plugins: + - name: session + instance_name: session + config: + secret: "{vault://mock/session-secret-again}" + vaults: + - description: my vault + name: mocksocket + prefix: mock + ]] + + finally(function() + helpers.unsetenv("SESSION_SECRET_AGAIN") + os.remove(yaml_file) + end) + + helpers.setenv("KONG_LUA_PATH_OVERRIDE", "./spec/fixtures/custom_vaults/?.lua;./spec/fixtures/custom_vaults/?/init.lua;;") + helpers.get_db_utils(strategy, { + "vaults", + }, { + "session" + }, { + "mocksocket" + }) + + local ok, err = helpers.start_kong({ + database = "off", + declarative_config = yaml_file, + vaults = "mocksocket", + plugins = "session", + }) + + assert.truthy(ok) + assert.not_matches("error", err) + assert.logfile().has.no.line("[error]", true, 0) + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.no.line(" {vault://mock/session-secret-again}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + + assert(helpers.restart_kong({ + database = "off", + vaults = "mocksocket", + plugins = "session", + declarative_config = "", + })) + + assert.logfile().has.no.line("[error]", true, 0) + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.no.line(" {vault://mock/session-secret-again}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + + assert(helpers.reload_kong("off", "reload --prefix " .. helpers.test_conf.prefix, { + database = "off", + vaults = "mocksocket", + plugins = "session", + declarative_config = "", + })) + + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.no.line(" {vault://mock/session-secret-again}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + end) + + it("dbless does not fail fatally when referencing secrets doesn't work in declarative configuration using vault entities", function() + helpers.clean_logfile() + + local yaml_file = helpers.make_yaml_file [[ + _format_version: "3.0" + _transform: true + plugins: + - name: session + instance_name: session + config: + secret: "{vault://mock/session-secret-unknown-again}" + vaults: + - description: my vault + name: mocksocket + prefix: mock + ]] + + finally(function() + os.remove(yaml_file) + end) + + helpers.setenv("KONG_LUA_PATH_OVERRIDE", "./spec/fixtures/custom_vaults/?.lua;./spec/fixtures/custom_vaults/?/init.lua;;") + helpers.get_db_utils(strategy, { + "vaults", + }, { + "session" + }, { + "mocksocket" + }) + + local ok, err = helpers.start_kong({ + database = "off", + declarative_config = yaml_file, + vaults = "mocksocket", + plugins = "session", + }) + + assert.truthy(ok) + assert.not_matches("error", err) + assert.logfile().has.no.line("[error]", true, 0) + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.line(" {vault://mock/session-secret-unknown-again}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + + assert(helpers.restart_kong({ + database = "off", + vaults = "mocksocket", + plugins = "session", + declarative_config = "", + })) + + assert.logfile().has.no.line("[error]", true, 0) + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.line(" {vault://mock/session-secret-unknown-again}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) + + assert(helpers.reload_kong("off", "reload --prefix " .. helpers.test_conf.prefix, { + database = "off", + vaults = "mocksocket", + plugins = "session", + declarative_config = "", + })) + + assert.logfile().has.no.line("traceback", true, 0) + assert.logfile().has.line(" {vault://mock/session-secret-unknown-again}", true, 0) + assert.logfile().has.no.line("could not find vault", true, 0) + + proxy_client = helpers.proxy_client() + + local res = proxy_client:get("/") + assert.res_status(404, res) + local body = assert.response(res).has.jsonbody() + assert.equal("no Route matched with those values", body.message) end) end end) From 069da055c35d857a62531cbc8c2fba5e643547f6 Mon Sep 17 00:00:00 2001 From: Xumin <100666470+StarlightIbuki@users.noreply.github.com> Date: Tue, 20 Feb 2024 12:53:33 +0000 Subject: [PATCH 52/91] fix(pluginserver): properly restart messagepack-based instances The bug was introduced when refactoring/cherry-picking. Fix #12364 Co-authored-by: Guilherme Salazar --- .../unreleased/kong/plugin_server_restart.yml | 3 +++ kong/runloop/plugin_servers/mp_rpc.lua | 18 +++++++------- kong/runloop/plugin_servers/pb_rpc.lua | 24 ++++++++----------- 3 files changed, 23 insertions(+), 22 deletions(-) create mode 100644 changelog/unreleased/kong/plugin_server_restart.yml diff --git a/changelog/unreleased/kong/plugin_server_restart.yml b/changelog/unreleased/kong/plugin_server_restart.yml new file mode 100644 index 00000000000..ed46b92bb16 --- /dev/null +++ b/changelog/unreleased/kong/plugin_server_restart.yml @@ -0,0 +1,3 @@ +message: "**Plugin Server**: fix an issue where Kong fails to properly restart MessagePack-based pluginservers (used in Python and Javascript plugins, for example)" +type: bugfix +scope: Core diff --git a/kong/runloop/plugin_servers/mp_rpc.lua b/kong/runloop/plugin_servers/mp_rpc.lua index ebd0943b265..118c3694c05 100644 --- a/kong/runloop/plugin_servers/mp_rpc.lua +++ b/kong/runloop/plugin_servers/mp_rpc.lua @@ -1,5 +1,7 @@ local kong_global = require "kong.global" local cjson = require "cjson.safe" +local _ + local msgpack do msgpack = require "MessagePack" local nil_pack = msgpack.packers["nil"] @@ -326,20 +328,20 @@ end function Rpc:handle_event(plugin_name, conf, phase) - local instance_id = self.get_instance_id(plugin_name, conf) - local _, err = bridge_loop(self, instance_id, phase) + local instance_id, err = self.get_instance_id(plugin_name, conf) + if not err then + _, err = bridge_loop(self, instance_id, phase) + end if err then - local ok, err2 = kong.worker_events.post("plugin_server", "reset_instances", - { plugin_name = plugin_name, conf = conf }) - if not ok then - kong.log.err("failed to post plugin_server reset_instances event: ", err2) - end + local err_lowered = err:lower() - if str_find(err:lower(), "no plugin instance") then + if str_find(err_lowered, "no plugin instance") then + self.reset_instance(plugin_name, conf) kong.log.warn(err) return self:handle_event(plugin_name, conf, phase) end + kong.log.err(err) end end diff --git a/kong/runloop/plugin_servers/pb_rpc.lua b/kong/runloop/plugin_servers/pb_rpc.lua index 8aae88de866..b94aca313ec 100644 --- a/kong/runloop/plugin_servers/pb_rpc.lua +++ b/kong/runloop/plugin_servers/pb_rpc.lua @@ -392,8 +392,8 @@ end function Rpc:handle_event(plugin_name, conf, phase) - local instance_id, res, err - instance_id, err = self.get_instance_id(plugin_name, conf) + local instance_id, err = self.get_instance_id(plugin_name, conf) + local res if not err then res, err = self:call("cmd_handle_event", { instance_id = instance_id, @@ -402,20 +402,16 @@ function Rpc:handle_event(plugin_name, conf, phase) end if not res or res == "" then - if err then - local err_lowered = err and err:lower() or "" - - kong.log.err(err_lowered) + local err_lowered = err and err:lower() or "unknown error" - if err_lowered == "not ready" then - self.reset_instance(plugin_name, conf) - end - if str_find(err_lowered, "no plugin instance") - or str_find(err_lowered, "closed") then - self.reset_instance(plugin_name, conf) - return self:handle_event(plugin_name, conf, phase) - end + if str_find(err_lowered, "no plugin instance", nil, true) + or str_find(err_lowered, "closed", nil, true) then + self.reset_instance(plugin_name, conf) + kong.log.warn(err) + return self:handle_event(plugin_name, conf, phase) end + + kong.log.err(err) end end From 8a7eac3def8508177b4def176b3afc1992ced6af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hans=20H=C3=BCbner?= Date: Mon, 22 Jan 2024 13:52:56 +0100 Subject: [PATCH 53/91] feat(test): add reconfiguration completion detection test plugin Unlike the previous implementation, this one does not require changes to Kong and its proxy path. It works based on the assumption that the order of admin API changes is preserved. The admin API client marks the end of the changes that it needs to see propagated to the data plane(s) by changing the configuration of this plugin, setting a particular configuration version number. On the proxy path, a header X-Kong-Configuration-Version is sent with that version number. The plugin's access handler verifies that the version number configured in the plugin (on the dataplane) matches the version number requested by the client. If the version numbers do not match, a 503 error is generated, which causes the client to retry. The plugin is available only to busted tests. It needs to be enabled when starting Kong. A new busted test helper function make_synchronized_clients is provided that automatically synchronizes a proxy client and an admin API client. The the test can freely mix invocations to either endpoints. Whenever a change is made through the admin API, the proxy path request is delayed until the change has propagated to the data plane. spec/02-integration/13-vaults/06-refresh-secrets_spec.lua has been updated to use the function as an illustration. --- .../13-vaults/06-refresh-secrets_spec.lua | 21 +- .../01-access_spec.lua | 186 ++++++++++++++++++ .../02-helper_spec.lua | 167 ++++++++++++++++ .../reconfiguration-completion/handler.lua | 29 +++ .../reconfiguration-completion/schema.lua | 16 ++ spec/helpers.lua | 115 ++++++++++- 6 files changed, 520 insertions(+), 14 deletions(-) create mode 100644 spec/03-plugins/39-reconfiguration-completion/01-access_spec.lua create mode 100644 spec/03-plugins/39-reconfiguration-completion/02-helper_spec.lua create mode 100644 spec/fixtures/custom_plugins/kong/plugins/reconfiguration-completion/handler.lua create mode 100644 spec/fixtures/custom_plugins/kong/plugins/reconfiguration-completion/schema.lua diff --git a/spec/02-integration/13-vaults/06-refresh-secrets_spec.lua b/spec/02-integration/13-vaults/06-refresh-secrets_spec.lua index c9d15b01cb4..21095e09248 100644 --- a/spec/02-integration/13-vaults/06-refresh-secrets_spec.lua +++ b/spec/02-integration/13-vaults/06-refresh-secrets_spec.lua @@ -34,14 +34,13 @@ for _, strategy in helpers.each_strategy() do database = strategy, prefix = helpers.test_conf.prefix, nginx_conf = "spec/fixtures/custom_nginx.template", - plugins = "dummy", + plugins = "dummy,reconfiguration-completion", vaults = "env", }) end) before_each(function() - admin_client = assert(helpers.admin_client()) - proxy_client = assert(helpers.proxy_client()) + proxy_client, admin_client = helpers.make_synchronized_clients() end) after_each(function() @@ -76,15 +75,13 @@ for _, strategy in helpers.each_strategy() do }) assert.res_status(200, res) - assert - .with_timeout(10) - .eventually(function() - local res = proxy_client:send { - method = "GET", - path = "/", - } - return res and res.status == 200 and res.headers["Dummy-Plugin"] == "MONSTER" and res.headers["X-Test-This"] == "SPIRIT" - end).is_truthy("Could not find header in request") + local res = proxy_client:send { + method = "GET", + path = "/", + } + assert.res_status(200, res) + assert.is_same("MONSTER", res.headers["Dummy-Plugin"]) + assert.is_same("SPIRIT", res.headers["X-Test-This"]) end) end) end diff --git a/spec/03-plugins/39-reconfiguration-completion/01-access_spec.lua b/spec/03-plugins/39-reconfiguration-completion/01-access_spec.lua new file mode 100644 index 00000000000..83768ef7ab8 --- /dev/null +++ b/spec/03-plugins/39-reconfiguration-completion/01-access_spec.lua @@ -0,0 +1,186 @@ +local helpers = require "spec.helpers" +local cjson = require "cjson" +local utils = require "kong.tools.utils" + +describe("Reconfiguration completion detection plugin", function() + + local STATE_UPDATE_FREQUENCY = .2 + + local admin_client + local proxy_client + + local function plugin_tests() + + local configuration_version = utils.uuid() + + local res = admin_client:post("/plugins", { + body = { + name = "reconfiguration-completion", + config = { + version = configuration_version, + } + }, + headers = { ["Content-Type"] = "application/json" }, + }) + local body = assert.res_status(201, res) + local plugin = cjson.decode(body) + local reconfiguration_completion_plugin_id = plugin.id + + res = admin_client:post("/plugins", { + body = { + name = "request-termination", + config = { + status_code = 200, + body = "kong terminated the request", + } + }, + headers = { ["Content-Type"] = "application/json" }, + }) + assert.res_status(201, res) + + res = admin_client:post("/services", { + body = { + name = "test-service", + url = "http://127.0.0.1", + }, + headers = { ["Content-Type"] = "application/json" }, + }) + body = assert.res_status(201, res) + local service = cjson.decode(body) + + -- We're running the route setup in `eventually` to cover for the unlikely case that reconfiguration completes + -- between adding the route, updating the plugin and requesting the path through the proxy path. + + local next_path do + local path_suffix = 0 + function next_path() + path_suffix = path_suffix + 1 + return "/" .. tostring(path_suffix) + end + end + + local service_path + + assert.eventually(function() + service_path = next_path() + + res = admin_client:post("/services/" .. service.id .. "/routes", { + body = { + paths = { service_path } + }, + headers = { ["Content-Type"] = "application/json" }, + }) + assert.res_status(201, res) + + configuration_version = utils.uuid() + res = admin_client:patch("/plugins/" .. reconfiguration_completion_plugin_id, { + body = { + config = { + version = configuration_version, + } + }, + headers = { ["Content-Type"] = "application/json" }, + }) + assert.res_status(200, res) + + res = proxy_client:get(service_path, + { + headers = { + ["If-Kong-Configuration-Version"] = configuration_version + } + }) + assert.res_status(503, res) + assert.equals("pending", res.headers['x-kong-reconfiguration-status']) + local retry_after = tonumber(res.headers['retry-after']) + ngx.sleep(retry_after) + end) + .with_timeout(10) + .has_no_error() + + assert.eventually(function() + res = proxy_client:get(service_path, + { + headers = { + ["If-Kong-Configuration-Version"] = configuration_version + } + }) + body = assert.res_status(200, res) + assert.equals("kong terminated the request", body) + end) + .has_no_error() + end + + describe("#traditional mode", function() + lazy_setup(function() + helpers.get_db_utils() + assert(helpers.start_kong({ + plugins = "bundled,reconfiguration-completion", + worker_consistency = "eventual", + worker_state_update_frequency = STATE_UPDATE_FREQUENCY, + })) + admin_client = helpers.admin_client() + proxy_client = helpers.proxy_client() + end) + + teardown(function() + if admin_client then + admin_client:close() + end + if proxy_client then + proxy_client:close() + end + helpers.stop_kong() + end) + + it('', plugin_tests) + end) + + describe("#hybrid mode", function() + lazy_setup(function() + helpers.get_db_utils() + + assert(helpers.start_kong({ + plugins = "bundled,reconfiguration-completion", + role = "control_plane", + database = "postgres", + prefix = "cp", + cluster_cert = "spec/fixtures/kong_clustering.crt", + cluster_cert_key = "spec/fixtures/kong_clustering.key", + lua_ssl_trusted_certificate = "spec/fixtures/kong_clustering.crt", + cluster_listen = "127.0.0.1:9005", + cluster_telemetry_listen = "127.0.0.1:9006", + nginx_conf = "spec/fixtures/custom_nginx.template", + db_update_frequency = STATE_UPDATE_FREQUENCY, + })) + + assert(helpers.start_kong({ + plugins = "bundled,reconfiguration-completion", + role = "data_plane", + database = "off", + prefix = "dp", + cluster_cert = "spec/fixtures/kong_clustering.crt", + cluster_cert_key = "spec/fixtures/kong_clustering.key", + lua_ssl_trusted_certificate = "spec/fixtures/kong_clustering.crt", + cluster_control_plane = "127.0.0.1:9005", + cluster_telemetry_endpoint = "127.0.0.1:9006", + proxy_listen = "0.0.0.0:9002", + worker_state_update_frequency = STATE_UPDATE_FREQUENCY, + })) + admin_client = helpers.admin_client() + proxy_client = helpers.proxy_client("127.0.0.1", 9002) + end) + + teardown(function() + if admin_client then + admin_client:close() + end + if proxy_client then + proxy_client:close() + end + helpers.stop_kong("dp") + helpers.stop_kong("cp") + end) + + it('', plugin_tests) + end) +end) diff --git a/spec/03-plugins/39-reconfiguration-completion/02-helper_spec.lua b/spec/03-plugins/39-reconfiguration-completion/02-helper_spec.lua new file mode 100644 index 00000000000..0ecbd6a9be0 --- /dev/null +++ b/spec/03-plugins/39-reconfiguration-completion/02-helper_spec.lua @@ -0,0 +1,167 @@ +local helpers = require "spec.helpers" +local cjson = require "cjson" + +describe("Reconfiguration completion detection helper", function() + + local STATE_UPDATE_FREQUENCY = .2 + + local admin_client + local proxy_client + + local function helper_tests(make_proxy_client) + local res = admin_client:post("/plugins", { + body = { + name = "request-termination", + config = { + status_code = 200, + body = "kong terminated the request", + } + }, + headers = { ["Content-Type"] = "application/json" }, + }) + local body = assert.res_status(201, res) + local request_termination_plugin_id = cjson.decode(body).id + + res = admin_client:post("/services", { + body = { + name = "test-service", + url = "http://127.0.0.1", + }, + headers = { ["Content-Type"] = "application/json" }, + }) + body = assert.res_status(201, res) + local service = cjson.decode(body) + + local path = "/foo-barak" + + res = admin_client:post("/services/" .. service.id .. "/routes", { + body = { + paths = { path } + }, + headers = { ["Content-Type"] = "application/json" }, + }) + assert.res_status(201, res) + + res = proxy_client:get(path) + body = assert.res_status(200, res) + assert.equals("kong terminated the request", body) + + res = admin_client:patch("/plugins/" .. request_termination_plugin_id, { + body = { + config = { + status_code = 404, + body = "kong terminated the request with 404", + } + }, + headers = { ["Content-Type"] = "application/json" }, + }) + assert.res_status(200, res) + + res = proxy_client:get(path) + body = assert.res_status(404, res) + assert.equals("kong terminated the request with 404", body) + + local second_admin_client = helpers.admin_client() + admin_client:synchronize_sibling(second_admin_client) + + res = second_admin_client:patch("/plugins/" .. request_termination_plugin_id, { + body = { + config = { + status_code = 405, + body = "kong terminated the request with 405", + } + }, + headers = { ["Content-Type"] = "application/json" }, + }) + assert.res_status(200, res) + + local second_proxy_client = make_proxy_client() + proxy_client:synchronize_sibling(second_proxy_client) + + res = second_proxy_client:get(path) + body = assert.res_status(405, res) + assert.equals("kong terminated the request with 405", body) + end + + describe("#traditional mode", function() + + local function make_proxy_client() + return helpers.proxy_client() + end + + lazy_setup(function() + helpers.get_db_utils() + assert(helpers.start_kong({ + plugins = "bundled,reconfiguration-completion", + worker_consistency = "eventual", + worker_state_update_frequency = STATE_UPDATE_FREQUENCY, + })) + proxy_client, admin_client = helpers.make_synchronized_clients() + end) + + teardown(function() + if admin_client then + admin_client:close() + end + if proxy_client then + proxy_client:close() + end + helpers.stop_kong() + end) + + it('', function () helper_tests(make_proxy_client) end) + end) + + describe("#hybrid mode", function() + + local function make_proxy_client() + return helpers.proxy_client("127.0.0.1", 9002) + end + + lazy_setup(function() + helpers.get_db_utils() + + assert(helpers.start_kong({ + plugins = "bundled,reconfiguration-completion", + role = "control_plane", + database = "postgres", + prefix = "cp", + cluster_cert = "spec/fixtures/kong_clustering.crt", + cluster_cert_key = "spec/fixtures/kong_clustering.key", + lua_ssl_trusted_certificate = "spec/fixtures/kong_clustering.crt", + cluster_listen = "127.0.0.1:9005", + cluster_telemetry_listen = "127.0.0.1:9006", + nginx_conf = "spec/fixtures/custom_nginx.template", + db_update_frequency = STATE_UPDATE_FREQUENCY, + })) + + assert(helpers.start_kong({ + plugins = "bundled,reconfiguration-completion", + role = "data_plane", + database = "off", + prefix = "dp", + cluster_cert = "spec/fixtures/kong_clustering.crt", + cluster_cert_key = "spec/fixtures/kong_clustering.key", + lua_ssl_trusted_certificate = "spec/fixtures/kong_clustering.crt", + cluster_control_plane = "127.0.0.1:9005", + cluster_telemetry_endpoint = "127.0.0.1:9006", + proxy_listen = "0.0.0.0:9002", + worker_state_update_frequency = STATE_UPDATE_FREQUENCY, + })) + proxy_client, admin_client = helpers.make_synchronized_clients({ proxy_client = make_proxy_client() }) + end) + + teardown(function() + if admin_client then + admin_client:close() + end + if proxy_client then + proxy_client:close() + end + helpers.stop_kong("dp") + helpers.stop_kong("cp") + end) + + it('', function () helper_tests(make_proxy_client) end) + end) +end) diff --git a/spec/fixtures/custom_plugins/kong/plugins/reconfiguration-completion/handler.lua b/spec/fixtures/custom_plugins/kong/plugins/reconfiguration-completion/handler.lua new file mode 100644 index 00000000000..8afb7f5ab0d --- /dev/null +++ b/spec/fixtures/custom_plugins/kong/plugins/reconfiguration-completion/handler.lua @@ -0,0 +1,29 @@ +local kong_meta = require "kong.meta" + +local ReconfigurationCompletionHandler = { + VERSION = kong_meta.version, + PRIORITY = 2000000, +} + + +function ReconfigurationCompletionHandler:rewrite(conf) + local status = "unknown" + local if_kong_configuration_version = kong.request and kong.request.get_header('if-kong-configuration-version') + if if_kong_configuration_version then + if if_kong_configuration_version ~= conf.version then + return kong.response.error( + 503, + "Service Unavailable", + { + ["X-Kong-Reconfiguration-Status"] = "pending", + ["Retry-After"] = tostring((kong.configuration.worker_state_update_frequency or 1) + 1), + } + ) + else + status = "complete" + end + end + kong.response.set_header("X-Kong-Reconfiguration-Status", status) +end + +return ReconfigurationCompletionHandler diff --git a/spec/fixtures/custom_plugins/kong/plugins/reconfiguration-completion/schema.lua b/spec/fixtures/custom_plugins/kong/plugins/reconfiguration-completion/schema.lua new file mode 100644 index 00000000000..3a7f8512233 --- /dev/null +++ b/spec/fixtures/custom_plugins/kong/plugins/reconfiguration-completion/schema.lua @@ -0,0 +1,16 @@ +local typedefs = require "kong.db.schema.typedefs" + +return { + name = "reconfiguration-completion", + fields = { + { protocols = typedefs.protocols }, + { config = { + type = "record", + fields = { + { version = { description = "Client-assigned version number for the current Kong Gateway configuration", + type = "string", + required = true, } }, + }, + }, }, + } +} diff --git a/spec/helpers.lua b/spec/helpers.lua index a86ca9a1061..cea72bad2b7 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -76,6 +76,7 @@ local https_server = require "spec.fixtures.https_server" local stress_generator = require "spec.fixtures.stress_generator" local resty_signal = require "resty.signal" local lfs = require "lfs" +local luassert = require "luassert.assert" ffi.cdef [[ int setenv(const char *name, const char *value, int overwrite); @@ -1254,6 +1255,116 @@ local function proxy_client_grpcs(host, port) end +--- +-- Reconfiguration completion detection helpers +-- + +local MAX_RETRY_TIME = 10 + +--- Set up admin client and proxy client to so that interactions with the proxy client +-- wait for preceding admin API client changes to have completed. + +-- @function make_synchronized_clients +-- @param clients table with admin_client and proxy_client fields (both optional) +-- @return admin_client, proxy_client + +local function make_synchronized_clients(clients) + clients = clients or {} + local synchronized_proxy_client = clients.proxy_client or proxy_client() + local synchronized_admin_client = clients.admin_client or admin_client() + + -- Install the reconfiguration completion detection plugin + local res = synchronized_admin_client:post("/plugins", { + headers = { ["Content-Type"] = "application/json" }, + body = { + name = "reconfiguration-completion", + config = { + version = "0", + } + }, + }) + local body = luassert.res_status(201, res) + local plugin = cjson.decode(body) + local plugin_id = plugin.id + + -- Wait until the plugin is active on the proxy path, indicated by the presence of the X-Kong-Reconfiguration-Status header + luassert.eventually(function() + res = synchronized_proxy_client:get("/non-existent-proxy-path") + luassert.res_status(404, res) + luassert.equals("unknown", res.headers['x-kong-reconfiguration-status']) + end) + .has_no_error() + + -- Save the original request functions for the admin and proxy client + local proxy_request = synchronized_proxy_client.request + local admin_request = synchronized_admin_client.request + + local current_version = 0 -- incremented whenever a configuration change is made through the admin API + local last_configured_version = 0 -- current version of the reconfiguration-completion plugin's configuration + + -- Wrap the admin API client request + function synchronized_admin_client.request(client, opts) + -- Whenever the configuration is changed through the admin API, increment the current version number + if opts.method == "POST" or opts.method == "PUT" or opts.method == "PATCH" or opts.method == "DELETE" then + current_version = current_version + 1 + end + return admin_request(client, opts) + end + + function synchronized_admin_client.synchronize_sibling(self, sibling) + sibling.request = self.request + end + + -- Wrap the proxy client request + function synchronized_proxy_client.request(client, opts) + -- If the configuration has been changed through the admin API, update the version number in the + -- reconfiguration-completion plugin. + if current_version > last_configured_version then + last_configured_version = current_version + res = admin_request(synchronized_admin_client, { + method = "PATCH", + path = "/plugins/" .. plugin_id, + headers = { ["Content-Type"] = "application/json" }, + body = cjson.encode({ + config = { + version = tostring(current_version), + } + }), + }) + luassert.res_status(200, res) + end + + -- Retry the request until the reconfiguration is complete and the reconfiguration completion + -- plugin on the database has been updated to the current version. + if not opts.headers then + opts.headers = {} + end + opts.headers["If-Kong-Configuration-Version"] = tostring(current_version) + local retry_until = ngx.now() + MAX_RETRY_TIME + local err + :: retry :: + res, err = proxy_request(client, opts) + if err then + return res, err + end + if res.headers['x-kong-reconfiguration-status'] ~= "complete" then + res:read_body() + ngx.sleep(res.headers['retry-after'] or 1) + if ngx.now() < retry_until then + goto retry + end + return nil, "reconfiguration did not occur within " .. MAX_RETRY_TIME .. " seconds" + end + return res, err + end + + function synchronized_proxy_client.synchronize_sibling(self, sibling) + sibling.request = self.request + end + + return synchronized_proxy_client, synchronized_admin_client +end + --- -- TCP/UDP server helpers -- @@ -1652,7 +1763,6 @@ end -- @section assertions local say = require "say" -local luassert = require "luassert.assert" require("spec.helpers.wait") --- Waits until a specific condition is met. @@ -3856,7 +3966,7 @@ do -- in above case, the id is 303. local msg_id = -1 local prefix_dir = "servroot" - + --- Check if echo server is ready. -- -- @function is_echo_server_ready @@ -4158,6 +4268,7 @@ end http_client = http_client, grpc_client = grpc_client, http2_client = http2_client, + make_synchronized_clients = make_synchronized_clients, wait_until = wait_until, pwait_until = pwait_until, wait_pid = wait_pid, From f80b7d59e4e27f280283838b099d038b59f7af0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Wed, 21 Feb 2024 17:47:09 +0100 Subject: [PATCH 54/91] chore(tests): remove redis code duplication in specs In tests there was a lot of code duplication related to redis connection, user adding, removing and db flushing. This commits extracts all of this code to redis_helper KAG-2130 --- .../01-helpers/04-redis_helper_spec.lua | 60 +++++++++++++++++++ .../23-rate-limiting/04-access_spec.lua | 48 ++------------- .../23-rate-limiting/05-integration_spec.lua | 38 +++--------- .../04-access_spec.lua | 31 +--------- .../05-integration_spec.lua | 39 +++--------- spec/helpers/redis_helper.lua | 40 +++++++++++++ 6 files changed, 125 insertions(+), 131 deletions(-) create mode 100644 spec/02-integration/01-helpers/04-redis_helper_spec.lua create mode 100644 spec/helpers/redis_helper.lua diff --git a/spec/02-integration/01-helpers/04-redis_helper_spec.lua b/spec/02-integration/01-helpers/04-redis_helper_spec.lua new file mode 100644 index 00000000000..6081309d443 --- /dev/null +++ b/spec/02-integration/01-helpers/04-redis_helper_spec.lua @@ -0,0 +1,60 @@ +local redis_helper = require "spec.helpers.redis_helper" +local helpers = require "spec.helpers" + +local REDIS_HOST = helpers.redis_host +local REDIS_PORT = helpers.redis_port +local REDIS_DATABASE1 = 1 +local REDIS_DATABASE2 = 2 + +describe("redis_helper tests", function() + describe("connect", function () + describe("when connection info is correct", function() + it("connects to redis", function() + local red, version = redis_helper.connect(REDIS_HOST, REDIS_PORT) + assert.is_truthy(version) + assert.is_not_nil(red) + end) + end) + + describe("when connection info is invalid", function () + it("does not connect to redis", function() + assert.has_error(function() + redis_helper.connect(REDIS_HOST, 5123) + end) + end) + end) + end) + + describe("reset_redis", function () + it("clears redis database", function() + -- given - redis with some values in 2 databases + local red = redis_helper.connect(REDIS_HOST, REDIS_PORT) + red:select(REDIS_DATABASE1) + red:set("dog", "an animal") + local ok, err = red:get("dog") + assert.falsy(err) + assert.same("an animal", ok) + + red:select(REDIS_DATABASE2) + red:set("cat", "also animal") + local ok, err = red:get("cat") + assert.falsy(err) + assert.same("also animal", ok) + + -- when - resetting redis + redis_helper.reset_redis(REDIS_HOST, REDIS_PORT) + + -- then - clears everything + red:select(REDIS_DATABASE1) + local ok, err = red:get("dog") + assert.falsy(err) + assert.same(ngx.null, ok) + + red:select(REDIS_DATABASE2) + local ok, err = red:get("cat") + assert.falsy(err) + assert.same(ngx.null, ok) + end) + end) +end) + diff --git a/spec/03-plugins/23-rate-limiting/04-access_spec.lua b/spec/03-plugins/23-rate-limiting/04-access_spec.lua index ba128c616ee..140dcf0e0ac 100644 --- a/spec/03-plugins/23-rate-limiting/04-access_spec.lua +++ b/spec/03-plugins/23-rate-limiting/04-access_spec.lua @@ -1,7 +1,6 @@ local helpers = require "spec.helpers" local cjson = require "cjson" -local redis = require "resty.redis" -local version = require "version" +local redis_helper = require "spec.helpers.redis_helper" local REDIS_HOST = helpers.redis_host @@ -56,41 +55,6 @@ local function GET(url, opt) end -local function redis_connect() - local red = assert(redis:new()) - red:set_timeout(2000) - assert(red:connect(REDIS_HOST, REDIS_PORT)) - local red_version = string.match(red:info(), 'redis_version:([%g]+)\r\n') - return red, assert(version(red_version)) -end - - -local function flush_redis() - local redis = require "resty.redis" - local red = assert(redis:new()) - red:set_timeout(2000) - local ok, err = red:connect(REDIS_HOST, REDIS_PORT) - if not ok then - error("failed to connect to Redis: " .. err) - end - - if REDIS_PASSWORD and REDIS_PASSWORD ~= "" then - local ok, err = red:auth(REDIS_PASSWORD) - if not ok then - error("failed to connect to Redis: " .. err) - end - end - - local ok, err = red:select(REDIS_DATABASE) - if not ok then - error("failed to change Redis database: " .. err) - end - - red:flushall() - red:close() -end - - local function client_requests(n, proxy_fn) local ret = { minute_limit = {}, @@ -419,7 +383,7 @@ describe(desc, function() _, db = helpers.get_db_utils(strategy, nil, { "rate-limiting", "key-auth" }) if policy == "redis" then - flush_redis() + redis_helper.reset_redis(REDIS_HOST, REDIS_PORT) elseif policy == "cluster" then db:truncate("ratelimiting_metrics") @@ -452,7 +416,7 @@ describe(desc, function() end if policy == "redis" then - flush_redis() + redis_helper.reset_redis(REDIS_HOST, REDIS_PORT) end end) @@ -1086,7 +1050,7 @@ describe(desc, function () _, db = helpers.get_db_utils(strategy, nil, { "rate-limiting", "key-auth" }) if policy == "redis" then - flush_redis() + redis_helper.reset_redis(REDIS_HOST, REDIS_PORT) elseif policy == "cluster" then db:truncate("ratelimiting_metrics") @@ -1293,7 +1257,7 @@ describe(desc, function () end) before_each(function() - flush_redis() + redis_helper.reset_redis(REDIS_HOST, REDIS_PORT) admin_client = helpers.admin_client() end) @@ -1319,7 +1283,7 @@ describe(desc, function () }, sync_rate = 10, }, service) - local red = redis_connect() + local red = redis_helper.connect(REDIS_HOST, REDIS_PORT) local ok, err = red:select(REDIS_DATABASE) if not ok then error("failed to change Redis database: " .. err) diff --git a/spec/03-plugins/23-rate-limiting/05-integration_spec.lua b/spec/03-plugins/23-rate-limiting/05-integration_spec.lua index 207cbb09918..1ec13be7900 100644 --- a/spec/03-plugins/23-rate-limiting/05-integration_spec.lua +++ b/spec/03-plugins/23-rate-limiting/05-integration_spec.lua @@ -1,7 +1,7 @@ local helpers = require "spec.helpers" -local redis = require "resty.redis" local version = require "version" local cjson = require "cjson" +local redis_helper = require "spec.helpers.redis_helper" local REDIS_HOST = helpers.redis_host @@ -19,29 +19,6 @@ local REDIS_PASSWORD = "secret" local SLEEP_TIME = 1 -local function redis_connect() - local red = redis:new() - red:set_timeout(2000) - assert(red:connect(REDIS_HOST, REDIS_PORT)) - local red_version = string.match(red:info(), 'redis_version:([%g]+)\r\n') - return red, assert(version(red_version)) -end - -local function flush_redis(red, db) - assert(red:select(db)) - red:flushall() -end - -local function add_redis_user(red) - assert(red:acl("setuser", REDIS_USER_VALID, "on", "allkeys", "allcommands", ">" .. REDIS_PASSWORD)) - assert(red:acl("setuser", REDIS_USER_INVALID, "on", "allkeys", "+get", ">" .. REDIS_PASSWORD)) -end - -local function remove_redis_user(red) - assert(red:acl("deluser", REDIS_USER_VALID)) - assert(red:acl("deluser", REDIS_USER_INVALID)) -end - describe("Plugin: rate-limiting (integration)", function() local client local bp @@ -56,7 +33,7 @@ describe("Plugin: rate-limiting (integration)", function() }, { "rate-limiting" }) - red, red_version = redis_connect() + red, red_version = redis_helper.connect(REDIS_HOST, REDIS_PORT) end) lazy_teardown(function() @@ -98,11 +75,11 @@ describe("Plugin: rate-limiting (integration)", function() -- https://github.com/Kong/kong/issues/3292 lazy_setup(function() - flush_redis(red, REDIS_DB_1) - flush_redis(red, REDIS_DB_2) - flush_redis(red, REDIS_DB_3) + red:flushall() + if red_version >= version("6.0.0") then - add_redis_user(red) + redis_helper.add_admin_user(red, REDIS_USER_VALID, REDIS_PASSWORD) + redis_helper.add_basic_user(red, REDIS_USER_INVALID, REDIS_PASSWORD) end bp = helpers.get_db_utils(nil, { @@ -219,7 +196,8 @@ describe("Plugin: rate-limiting (integration)", function() lazy_teardown(function() helpers.stop_kong() if red_version >= version("6.0.0") then - remove_redis_user(red) + redis_helper.remove_user(red, REDIS_USER_VALID) + redis_helper.remove_user(red, REDIS_USER_INVALID) end end) diff --git a/spec/03-plugins/24-response-rate-limiting/04-access_spec.lua b/spec/03-plugins/24-response-rate-limiting/04-access_spec.lua index 4fb9ecb5d0f..c7def76fe69 100644 --- a/spec/03-plugins/24-response-rate-limiting/04-access_spec.lua +++ b/spec/03-plugins/24-response-rate-limiting/04-access_spec.lua @@ -1,6 +1,6 @@ local cjson = require "cjson" local helpers = require "spec.helpers" - +local redis_helper = require "spec.helpers.redis_helper" local REDIS_HOST = helpers.redis_host local REDIS_PORT = helpers.redis_port @@ -25,33 +25,6 @@ local function wait() ngx.sleep(1 - millis) end - -local function flush_redis() - local redis = require "resty.redis" - local red = redis:new() - red:set_timeout(2000) - local ok, err = red:connect(REDIS_HOST, REDIS_PORT) - if not ok then - error("failed to connect to Redis: " .. err) - end - - if REDIS_PASSWORD and REDIS_PASSWORD ~= "" then - local ok, err = red:auth(REDIS_PASSWORD) - if not ok then - error("failed to connect to Redis: " .. err) - end - end - - local ok, err = red:select(REDIS_DATABASE) - if not ok then - error("failed to change Redis database: " .. err) - end - - red:flushall() - red:close() -end - - local redis_confs = { no_ssl = { redis_port = REDIS_PORT, @@ -102,7 +75,7 @@ local function init_db(strategy, policy) }) if policy == "redis" then - flush_redis() + redis_helper.reset_redis(REDIS_HOST, REDIS_PORT) end return bp diff --git a/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua b/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua index bd0544d33e4..d4e3cef0d0b 100644 --- a/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua +++ b/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua @@ -1,7 +1,8 @@ local helpers = require "spec.helpers" -local redis = require "resty.redis" local version = require "version" local cjson = require "cjson" +local redis_helper = require "spec.helpers.redis_helper" + local tostring = tostring @@ -21,28 +22,6 @@ local REDIS_PASSWORD = "secret" local SLEEP_TIME = 1 -local function redis_connect() - local red = redis:new() - red:set_timeout(2000) - assert(red:connect(REDIS_HOST, REDIS_PORT)) - local red_version = string.match(red:info(), 'redis_version:([%g]+)\r\n') - return red, assert(version(red_version)) -end - -local function flush_redis(red, db) - assert(red:select(db)) - red:flushall() -end - -local function add_redis_user(red) - assert(red:acl("setuser", REDIS_USER_VALID, "on", "allkeys", "+incrby", "+select", "+info", "+expire", "+get", "+exists", ">" .. REDIS_PASSWORD)) - assert(red:acl("setuser", REDIS_USER_INVALID, "on", "allkeys", "+get", ">" .. REDIS_PASSWORD)) -end - -local function remove_redis_user(red) - assert(red:acl("deluser", REDIS_USER_VALID)) - assert(red:acl("deluser", REDIS_USER_INVALID)) -end describe("Plugin: rate-limiting (integration)", function() local client @@ -59,8 +38,7 @@ describe("Plugin: rate-limiting (integration)", function() }, { "response-ratelimiting", }) - red, red_version = redis_connect() - + red, red_version = redis_helper.connect(REDIS_HOST, REDIS_PORT) end) lazy_teardown(function() @@ -100,11 +78,11 @@ describe("Plugin: rate-limiting (integration)", function() -- https://github.com/Kong/kong/issues/3292 lazy_setup(function() - flush_redis(red, REDIS_DB_1) - flush_redis(red, REDIS_DB_2) - flush_redis(red, REDIS_DB_3) + red:flushall() + if red_version >= version("6.0.0") then - add_redis_user(red) + redis_helper.add_admin_user(red, REDIS_USER_VALID, REDIS_PASSWORD) + redis_helper.add_basic_user(red, REDIS_USER_INVALID, REDIS_PASSWORD) end bp = helpers.get_db_utils(nil, { @@ -219,7 +197,8 @@ describe("Plugin: rate-limiting (integration)", function() lazy_teardown(function() helpers.stop_kong() if red_version >= version("6.0.0") then - remove_redis_user(red) + redis_helper.remove_user(red, REDIS_USER_VALID) + redis_helper.remove_user(red, REDIS_USER_INVALID) end end) diff --git a/spec/helpers/redis_helper.lua b/spec/helpers/redis_helper.lua new file mode 100644 index 00000000000..37d03545fa1 --- /dev/null +++ b/spec/helpers/redis_helper.lua @@ -0,0 +1,40 @@ +local redis = require "resty.redis" +local version = require "version" + +local DEFAULT_TIMEOUT = 2000 + + +local function connect(host, port) + local redis_client = redis:new() + redis_client:set_timeout(DEFAULT_TIMEOUT) + assert(redis_client:connect(host, port)) + local red_version = string.match(redis_client:info(), 'redis_version:([%g]+)\r\n') + return redis_client, assert(version(red_version)) +end + +local function reset_redis(host, port) + local redis_client = connect(host, port) + redis_client:flushall() + redis_client:close() +end + +local function add_admin_user(redis_client, username, password) + assert(redis_client:acl("setuser", username, "on", "allkeys", "allcommands", ">" .. password)) +end + +local function add_basic_user(redis_client, username, password) + assert(redis_client:acl("setuser", username, "on", "allkeys", "+get", ">" .. password)) +end + +local function remove_user(redis_client, username) + assert(redis_client:acl("deluser", username)) +end + + +return { + connect = connect, + add_admin_user = add_admin_user, + add_basic_user = add_basic_user, + remove_user = remove_user, + reset_redis = reset_redis, +} From 31926752d792f92718f04b8f424ef2b7f6dd8080 Mon Sep 17 00:00:00 2001 From: Qi Date: Thu, 22 Feb 2024 23:03:23 +0800 Subject: [PATCH 55/91] fix(timer-ng): decrease the minimum/maximum threads Too high concurrency setting might make Kong throws error at the runtime. --- .../unreleased/kong/decrease-cocurrency-limit-of-timer-ng.yml | 3 +++ kong/globalpatches.lua | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog/unreleased/kong/decrease-cocurrency-limit-of-timer-ng.yml diff --git a/changelog/unreleased/kong/decrease-cocurrency-limit-of-timer-ng.yml b/changelog/unreleased/kong/decrease-cocurrency-limit-of-timer-ng.yml new file mode 100644 index 00000000000..4e62daeb58d --- /dev/null +++ b/changelog/unreleased/kong/decrease-cocurrency-limit-of-timer-ng.yml @@ -0,0 +1,3 @@ +message: | + Fix a bug where the ulimit setting (open files) is low Kong will fail to start as the lua-resty-timer-ng exhausts the available worker_connections. Decrease the concurrency range of the lua-resty-timer-ng library from [512, 2048] to [256, 1024] to fix this bug. +type: bugfix diff --git a/kong/globalpatches.lua b/kong/globalpatches.lua index 014183d5839..33b6c9ee01c 100644 --- a/kong/globalpatches.lua +++ b/kong/globalpatches.lua @@ -99,8 +99,8 @@ return function(options) else _timerng = require("resty.timerng").new({ - min_threads = 512, - max_threads = 2048, + min_threads = 256, + max_threads = 1024, }) end From a759f5adbc1b4776525d8cd2e18e5cb7f234c73d Mon Sep 17 00:00:00 2001 From: Niklaus Schen <8458369+Water-Melon@users.noreply.github.com> Date: Fri, 23 Feb 2024 18:00:56 +0800 Subject: [PATCH 56/91] docs(changelog): update changelog for gRPC TLS seclevel change KAG-3295 --- changelog/unreleased/kong/set_grpc_tls_seclevel.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 changelog/unreleased/kong/set_grpc_tls_seclevel.yml diff --git a/changelog/unreleased/kong/set_grpc_tls_seclevel.yml b/changelog/unreleased/kong/set_grpc_tls_seclevel.yml new file mode 100644 index 00000000000..02d068713e9 --- /dev/null +++ b/changelog/unreleased/kong/set_grpc_tls_seclevel.yml @@ -0,0 +1,3 @@ +message: Set security level of gRPC's TLS to 0 when ssl_cipher_suite is set to old +type: bugfix +scope: Configuration From f135c7042e5f177d7de6f10ff5c03d52636ccf1b Mon Sep 17 00:00:00 2001 From: Zachary Hu <6426329+outsinre@users.noreply.github.com> Date: Fri, 23 Feb 2024 18:02:24 +0800 Subject: [PATCH 57/91] docs(*): CE changelog automation and verification (#12610) Please check the contained README.md. --- changelog/Makefile | 114 +++++++++++ changelog/README.md | 137 +++++++++++++ changelog/create_pr | 25 +++ changelog/verify-prs | 464 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 740 insertions(+) create mode 100644 changelog/Makefile create mode 100644 changelog/README.md create mode 100644 changelog/create_pr create mode 100755 changelog/verify-prs diff --git a/changelog/Makefile b/changelog/Makefile new file mode 100644 index 00000000000..9f88b59c9cb --- /dev/null +++ b/changelog/Makefile @@ -0,0 +1,114 @@ +# SHELL := $(shell which bash) +# $(info Use shell $(SHELL)) + +OWNER_REPO := Kong/kong +BASE_BRANCH ?= release/3.6.x +VERSION ?= 3.6.0 +DEBUG ?= false +UNRELEASED_DIR ?= unreleased + +BRANCH_NAME := generate-$(VERSION)-changelog +ORIGIN_BRANCH := origin/$(BASE_BRANCH) + +.PHONY: all check_tools check_version create_branch generate push_changelog create_pr + +all: check_tools check_version create_branch generate push_changelog create_pr +no_pr: check_tools check_version create_branch generate push_changelog + +REQUIRED_TOOLS := git changelog curl jq +check_tools: + $(foreach cmd,$(REQUIRED_TOOLS), \ + $(if $(shell command -v $(cmd) 2>/dev/null), $(info $(cmd) found), \ + $(error command '$(cmd)' command not found) \ + ) \ + ) +ifndef GITHUB_TOKEN + $(error environment variable GITHUB_TOKEN not found) +else + $(info GITHUB_TOKEN found) +endif + +BINARY_VERSION := $(shell changelog -v | awk '{print $$3}') +BAD_VERSION := 0.0.1 +REQUIRED_VERSION := 0.0.2 +check_version: + @if [ $(BINARY_VERSION) = $(BAD_VERSION) ] ; then \ + echo "changelog version is $(BINARY_VERSION). Upgrade to $(REQUIRED_VERSION) at least." ; \ + false ; \ + else \ + echo "all required tools satisfied" ; \ + fi + +create_branch: + @git fetch + @git submodule update --init --recursive + @git checkout -B $(BRANCH_NAME) $(ORIGIN_BRANCH) + +generate: + @rm -f $(VERSION).md + @touch $(VERSION).md + + @if [ -d "$(UNRELEASED_DIR)/kong" ]; then \ + if [ -f "$(VERSION)/$(VERSION).md" ]; then \ + changelog --debug=$(DEBUG) generate \ + --repo-path . \ + --changelog-paths $(VERSION)/kong,$(UNRELEASED_DIR)/kong \ + --title Kong \ + --github-issue-repo Kong/kong \ + --github-api-repo $(OWNER_REPO) \ + --with-jiras \ + >> $(VERSION).md; \ + else \ + changelog --debug=$(DEBUG) generate \ + --repo-path . \ + --changelog-paths $(UNRELEASED_DIR)/kong \ + --title Kong \ + --github-issue-repo Kong/kong \ + --github-api-repo $(OWNER_REPO) \ + --with-jiras \ + >> $(VERSION).md; \ + fi \ + fi + @if [ -d "$(UNRELEASED_DIR)/kong-manager" ]; then \ + if [ -f "$(VERSION)/$(VERSION).md" ]; then \ + changelog --debug=$(DEBUG) generate \ + --repo-path . \ + --changelog-paths $(VERSION)/kong-manager,$(UNRELEASED_DIR)/kong-manager \ + --title Kong-Manager \ + --github-issue-repo Kong/kong-manager \ + --github-api-repo $(OWNER_REPO) \ + --with-jiras \ + >> $(VERSION).md; \ + else \ + changelog --debug=$(DEBUG) generate \ + --repo-path . \ + --changelog-paths $(UNRELEASED_DIR)/kong-manager \ + --title Kong-Manager \ + --github-issue-repo Kong/kong-manager \ + --github-api-repo $(OWNER_REPO) \ + --with-jiras \ + >> $(VERSION).md; \ + fi \ + fi + + @echo + @echo "Please inspect $(VERSION).md" + +push_changelog: + @mkdir -p $(VERSION) + @mv -f $(VERSION).md $(VERSION)/ + @for i in kong kong-manager ; do \ + mkdir -p $(UNRELEASED_DIR)/$$i ; \ + mkdir -p $(VERSION)/$$i ; \ + git mv -k $(UNRELEASED_DIR)/$$i/*.yml $(VERSION)/$$i/ ; \ + touch $(UNRELEASED_DIR)/$$i/.gitkeep ; \ + done + @git add . + @git commit -m "docs(release): genereate $(VERSION) changelog" + @git push -fu origin HEAD + + @echo + @echo "Successfully updated $(BRANCH_NAME) to GitHub." + +create_pr: + @bash create_pr $(OWNER_REPO) $(BASE_BRANCH) $(VERSION) $(BRANCH_NAME) diff --git a/changelog/README.md b/changelog/README.md new file mode 100644 index 00000000000..5a9aacc2f6d --- /dev/null +++ b/changelog/README.md @@ -0,0 +1,137 @@ +# Setup + +Download binary `changelog 0.0.2` from [Kong/gateway-changelog](https://github.com/Kong/gateway-changelog/releases), +or [release-helper](https://github.com/outsinre/release-helper/blob/main/changelog), +and add it to environment variable `PATH`. + +```bash +~ $ PATH="/path/to/changelog:$PATH" + +~ $ changelog +changelog version 0.0.2 +``` + +Ensure `GITHUB_TOKEN` is set in your environment. + +```bash +~ $ echo $GITHUB_TOKEN +``` + +# Create changelog PR + +The command will create a new changelog PR or update an existing one. +Please repeat the command if functional PRs with changelog are merged +after the creation or merge of the changelog PR. + +The command depends on tools like `curl`, `jq`, etc., and will refuse to + create or update changelog PR if any of the tools is not satisfied. + +```bash +~ $ pwd +/Users/zachary/workspace/kong/changelog + +~ $ make BASE_BRANCH="release/3.6.x" VERSION="3.6.0" +``` + +The arguments are clarified as below. + +1. `BASE_BRANCH`: the origin branch that the changelog PR is created from. It + is also the merge base. + + The local repo does not have to be on the base branch. +2. `VERSION`: the release version number we are creating the changelog PR for. + + It can be arbitrary strings as long as you know what you are doing (e.g. for + test purpose) +3. `DEBUG`: shows debug output. Default to `false`. + +# Verify Development PRs + +Given two arbitrary revisions, list commits, PRs, PRs without changelog +and PRs without CE2EE. + +If a CE PR has neither the 'cherry-pick kong-ee' label nor +has cross-referenced EE PRs with 'cherry' in the title, +it is HIGHLY PROBABLY not synced to EE. This is only experimental +as developers may not follow the CE2EE guideline. +However, it is a quick shortcut for us to validate the majority of CE PRs. + +Show the usage. + +```bash +~ $ pwd +/Users/zachary/workspace/kong + +~ $ changelog/verify-prs -h +Version: 0.1 + Author: Zachary Hu (zhucac AT outlook.com) + Script: Compare between two revisions (e.g. tags and branches), and output + commits, PRs, PRs without changelog and CE PRs without CE2EE (experimental). + + A PR should have an associated YML file under 'changelog/unreleased', otherwise + it is printed for verification. + + Regarding CE2EE, if a CE PR has any cross-referenced EE PRs, it is regarded synced + to EE. If strict mode is enabled, associated EE PRs must contain keyword 'cherry' + in the title. If a CE PR is labelled with 'cherry-pick kong-ee', it is regarded synced + to EE. If a CE PR is not synced to EE, it is printed for verification. + + Usage: changelog/verify-prs -h + + -v, --verbose Print debug info. + + --strict-filter When checking if a CE PR is synced to EE, + more strict filters are applied. + + --safe-mode When checking if a CE PR is synced to EE, + check one by one. This overrides '--bulk'. + + --bulk N Number of jobs ran concurrency. Default is '5'. + Adjust this value to your CPU cores. + +Example: + changelog/verify-prs --org-repo kong/kong --base-commit 3.4.2 --head-commit 3.4.3 [--strict-filter] [--bulk 5] [--safe-mode] [-v] + + ORG_REPO=kong/kong BASE_COMMIT=3.4.2 HEAD_COMMIT=3.4.3 changelog/verify-prs +``` + +Run the script. Both `--base-commit` and `--head-commit` can be set to branch names. + +```bash +~ $ pwd +/Users/zachary/workspace/kong + +~ $ changelog/verify-prs --org-repo kong/kong --base-commit 3.4.0 --head-commit 3.5.0 +Org Repo: kong/kong +Base Commit: 3.4.0 +Head Commit: 3.5.0 + +comparing between '3.4.0' and '3.5.0' +number of commits: 280 +number of pages: 6 +commits per page: 50 + +PRs: +https://github.com/Kong/kong/pull/7414 +... + +PRs without changelog: +https://github.com/Kong/kong/pull/7413 +... + +PRs without 'cherry-pick kong-ee' label: +https://github.com/Kong/kong/pull/11721 +... + +PRs without cross-referenced EE PRs: +https://github.com/Kong/kong/pull/11304 +... + +Commits: /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO/commits.txt +PRs: /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO/prs.txt +PRs without changelog: /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO/prs_no_changelog.txt +CE PRs without cherry-pick label: /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO/prs_no_cherrypick_label.txt +CE PRs without referenced EE cherry-pick PRs: /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO/prs_no_cross_reference.txt + +Remeber to remove /var/folders/wc/fnkx5qmx61l_wx5shysmql5r0000gn/T/outputXXX.JEkGD8AO +``` diff --git a/changelog/create_pr b/changelog/create_pr new file mode 100644 index 00000000000..e765bf78250 --- /dev/null +++ b/changelog/create_pr @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +echo " +Checking existing changelog PR ..." +response=$( + curl -sSL \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "https://api.github.com/repos/${1}/pulls?state=open&base=${2}&head=${4}" \ + | jq -er '.[] | select(.head.ref == "'"${4}"'") | [.html_url, .head.ref] | @tsv' +) + +if [[ -z "${response:+x}" ]] ; then + echo "Not found. Creating ..." + curl -sSL \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "https://api.github.com/repos/${1}/pulls" \ + -d '{"base":"'"${2}"'", "title":"'"Generate ${3} changelog"'","body":"'"Generate ${3} changelog"'","head":"'"${4}"'"}' \ + | jq -r '[.html_url, .head.ref] | @tsv' +else + printf 'Updated existing PR: %s\n' "${response}" +fi diff --git a/changelog/verify-prs b/changelog/verify-prs new file mode 100755 index 00000000000..1cbe0a51b93 --- /dev/null +++ b/changelog/verify-prs @@ -0,0 +1,464 @@ +#!/usr/bin/env bash + +function warn () { + >&2 printf '%s\n' "$@" +} + +function die () { + local st + st="$?" + case $2 in + (*[^0-9]*|'') : ;; + (*) st=$2 ;; + esac + + if [[ -n "$1" ]] ; then warn "$1" ; fi + + warn "WARNING: $0 is terminated" "output dir $out_dir removed" + rm -rf "$out_dir" + + exit "$st" +} + +function show_help () { + local prg + prg="${BASH_SOURCE[0]}" + cat <<-EOF +Version: 0.1 + Author: Zachary Hu (zhucac AT outlook.com) + Script: Compare between two revisions (e.g. tags and branches), and output + commits, PRs, PRs without changelog and CE PRs without CE2EE (experimental). + + A PR should have an associated YML file under 'changelog/unreleased', otherwise + it is printed for verification. + + Regarding CE2EE, if a CE PR has any cross-referenced EE PRs, it is regarded synced + to EE. If strict mode is enabled, associated EE PRs must contain keyword 'cherry' + in the title. If a CE PR is labelled with 'cherry-pick kong-ee', it is regarded synced + to EE. If a CE PR is not synced to EE, it is printed for verification. + + Usage: ${prg} -h + + -v, --verbose Print debug info. + + --strict-filter When checking if a CE PR is synced to EE, + more strict filters are applied. + + --safe-mode When checking if a CE PR is synced to EE, + check one by one. This overrides '--bulk'. + + --bulk N Number of jobs ran concurrency. Default is '5'. + Adjust this value to your CPU cores. + + ${prg} --org-repo kong/kong --base-commit 3.4.2 --head-commit 3.4.3 [--strict-filter] [--bulk 5] [--safe-mode] [-v] + + ORG_REPO=kong/kong BASE_COMMIT=3.4.2 HEAD_COMMIT=3.4.3 $prg +EOF +} + +function set_globals () { + ORG_REPO="${ORG_REPO:-kong/kong}" + BASE_COMMIT="${BASE_COMMIT:-3.4.2.0}" + HEAD_COMMIT="${HEAD_COMMIT:-3.4.2.1}" + + verbose=0 + STRICT_FILTER=0 + SAFE_MODE=0 + + BULK=5 + USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36" + + out_dir=$(mktemp -dt outputXXX) + commits_file="${out_dir}/commits.txt" ; touch "$commits_file" + prs_file="${out_dir}/prs.txt" ; touch "$prs_file" + prs_no_changelog_file="${out_dir}/prs_no_changelog.txt" ; touch "$prs_no_changelog_file" + prs_no_cherrypick_label_file="${out_dir}/prs_no_cherrypick_label.txt" ; touch "$prs_no_cherrypick_label_file" + prs_no_cross_reference_file="${out_dir}/prs_no_cross_reference.txt" ; touch "$prs_no_cross_reference_file" + + num_of_commits=0 + + per_page=100 + num_of_pages=1 +} + +function parse_args () { + while : ; do + case "$1" in + (-h|--help) + show_help + exit + ;; + (-v|--verbose) + set -x + verbose=$(( verbose + 1 )) + ;; + (--org-repo) + if [[ -n "$2" ]] ; then + ORG_REPO="$2" + else + die 'ERROR: "--org-repo" requires a non-empty option argument.' 2 + fi + shift + ;; + (--org-repo=*) + ORG_REPO="${1#--org-repo=}" + if [[ -z "$ORG_REPO" ]] ; then + die 'ERROR: "--org-repo=" requires a non-empty option argument followed immediately.' 2 + fi + ;; + (--base-commit) + if [[ -n "$2" ]] ; then + BASE_COMMIT="$2" + else + die 'ERROR: "--base-commit" requires a non-empty option argument.' 2 + fi + shift + ;; + (--base-commit=*) + BASE_COMMIT="${1#--base-commit=}" + if [[ -z "$BASE_COMMIT" ]] ; then + die 'ERROR: "--base-commit=" requires a non-empty option argument followed immediately.' 2 + fi + ;; + (--head-commit) + if [[ -n "$2" ]] ; then + HEAD_COMMIT="$2" + else + die 'ERROR: "--head-commit" requires a non-empty option argument.' 2 + fi + shift + ;; + (--head-commit=*) + HEAD_COMMIT="${1#--base-commit=}" + if [[ -z "$HEAD_COMMIT" ]] ; then + die 'ERROR: "--head-commit=" requires a non-empty option argument followed immediately.' 2 + fi + ;; + (--bulk) + if [[ -n "$2" ]] ; then + BULK="$2" + else + die 'ERROR: "--bulk" requires a non-empty option argument.' 2 + fi + shift + ;; + (--bulk=*) + BULK="${1#--bulk=}" + if [[ -z "$BULK" ]] ; then + die 'ERROR: "--bulk=" requires a non-empty option argument followed immediately.' 2 + fi + ;; + (--strict-filter) + STRICT_FILTER=1 + ;; + (--safe-mode) + SAFE_MODE=1 + ;; + (--) + shift + break + ;; + (-?*) + warn "WARNING: unknown option (ignored): $1" + ;; + (*) + break + ;; + esac + + shift + done +} + +function prepare_args () { + parse_args "$@" + + if [[ -z "${ORG_REPO:+x}" ]] ; then + warn "WARNING: ORG_REPO must be provided" + fi + if [[ -z "${BASE_COMMIT:+x}" ]] ; then + warn "WARNING: BASE_COMMIT must be provided" + fi + if [[ -z "${HEAD_COMMIT:+x}" ]] ; then + warn "WARNING: HEAD_COMMIT must be provided" + fi + if [[ -z "${GITHUB_TOKEN:+x}" ]] ; then + warn "WARNING: GITHUB_TOKEN must be provided" + fi + if (( BULK >= 8 )) ; then + warn "WARNING: job concurrency $BULK is too high. May reach the rate limit of GitHub API." + fi + if (( SAFE_MODE )) ; then + warn "WARNING: safe mode enabled. Jobs takes longer time. Take a cup of coffee!" + fi + + printf '%s\n' \ + "Org Repo: ${ORG_REPO}" \ + "Base Commit: ${BASE_COMMIT}" \ + "Head Commit: ${HEAD_COMMIT}" +} + +function get_num_pages_commits () { + local first_paged_response + first_paged_response=$( curl -i -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + "https://api.github.com/repos/${ORG_REPO}/compare/${BASE_COMMIT}...${HEAD_COMMIT}?page=1&per_page=${per_page}" ) + + local status_line + status_line=$( sed -n 1p <<< "$first_paged_response" ) + if ! [[ "$status_line" =~ 200 ]] ; then + die 'ERROR: cannot request GitHub API. Please check arguments or try option "-v"' 2 + fi + + local link_header + link_header=$( awk '/^link:/ { print; exit }' <<< "$first_paged_response" ) + IFS="," read -ra links <<< "$link_header" + + local regex='[^_](page=([0-9]+)).*rel="last"' + for link in "${links[@]}" ; do + if [[ "$link" =~ $regex ]] ; then + num_of_pages="${BASH_REMATCH[2]}" + break + fi + done + + num_of_commits=$( awk 'BEGIN { FS="[[:space:]]+|," } /total_commits/ { print $3; exit }' <<< "$first_paged_response" ) + printf 'number of commits: %s\n' "$num_of_commits" + +} + +function get_commits_prs () { + get_num_pages_commits + printf 'number of pages: %s\n' "$num_of_pages" + printf 'commits per page: %s\n' "$per_page" + + printf '%s\n' "" "PRs:" + for i in $( seq 1 "${num_of_pages}" ) ; do + mapfile -t < <( curl -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + "https://api.github.com/repos/${ORG_REPO}/compare/${BASE_COMMIT}...${HEAD_COMMIT}?page=${i}&per_page=${per_page}" | \ + jq -r '.commits[].sha' ) + + local max_per_request=17 + local BASE_Q="repo:${ORG_REPO}%20type:pr%20is:merged" + local full_q="$BASE_Q" + local count=0 + for commit in "${MAPFILE[@]}" ; do + printf '%s\n' "${commit:0:9}" >> "$commits_file" + + full_q="${full_q}%20${commit:0:9}" + count=$(( count+1 )) + + if ! (( count % max_per_request )) || test "$count" -eq "$per_page" || test "$count" -eq "$num_of_commits" ; then + curl -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + "https://api.github.com/search/issues?q=$full_q" | jq -r '.items[].html_url' | tee -a "$prs_file" + + full_q="$BASE_Q" + fi + done + done + + sort -uo "$prs_file" "$prs_file" +} + +function check_pr_changelog () { + if [[ -z "${1:+x}" ]] ; then return ; fi + + local changelog_pattern="changelog/unreleased/kong*/*.yml" + local req_url="https://api.github.com/repos/${ORG_REPO}/pulls/PR_NUMBER/files" + local pr_number="${1##https*/}" + req_url="${req_url/PR_NUMBER/$pr_number}" + mapfile -t < <( curl -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "$req_url" | jq -r '.[].filename' ) + + local has_changelog=0 + for f in "${MAPFILE[@]}" ; do + if [[ "$f" == ${changelog_pattern} ]] ; then has_changelog=1; break; fi + done + if ! (( has_changelog )) ; then echo "$1" | tee -a "$prs_no_changelog_file" ; fi +} + +function check_changelog () { + echo -e "\nPRs without changelog:" + export ORG_REPO="$ORG_REPO" USER_AGENT="$USER_AGENT" prs_no_changelog_file="$prs_no_changelog_file" + export -f check_pr_changelog + if type parallel >/dev/null 2>&1 ; then + parallel -j "$BULK" check_pr_changelog <"$1" + else + warn "WARNING: GNU 'parallel' is not available, fallback to 'xargs'" + <"$1" xargs -P "$BULK" -n1 bash -c 'check_pr_changelog "$@"' _ + fi + sort -uo "$prs_no_changelog_file" "$prs_no_changelog_file" +} + +function check_cherrypick_label () { + if [[ -z "${1:+x}" ]] ; then return ; fi + + local label_pattern="cherry-pick kong-ee" + local req_url="https://api.github.com/repos/${ORG_REPO}/issues/PR_NUMBER/labels" + local pr_number="${1##https://*/}" + req_url="${req_url/PR_NUMBER/$pr_number}" + mapfile -t < <( curl -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "$req_url" | jq -r '.[].name' ) + + local has_label=0 + for l in "${MAPFILE[@]}" ; do + if [[ "$l" == ${label_pattern} ]] ; then has_label=1; break; fi + done + if ! (( has_label )) ; then echo "$1" | tee -a "$prs_no_cherrypick_label_file" ; fi +} + +function check_cross_reference () { + if [[ -z "${1:+x}" ]] ; then return ; fi + + local req_url="https://api.github.com/repos/${ORG_REPO}/issues/PR_NUMBER/timeline" + local pr_number="${1##https://*/}" + req_url="${req_url/PR_NUMBER/$pr_number}" + + local first_paged_response + first_paged_response=$( curl -i -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "${req_url}?page=1&per_page=${per_page}" ) + + local link_header + link_header=$( awk '/^link:/ { print; exit }' <<< "$first_paged_response" ) + IFS="," read -ra links <<< "$link_header" + + local count=1 + local regex='[^_](page=([0-9]+)).*rel="last"' + for link in "${links[@]}" ; do + if [[ "$link" =~ $regex ]] ; then + count="${BASH_REMATCH[2]}" + break + fi + done + + local jq_filter + if (( STRICT_FILTER )) ; then + jq_filter='.[].source.issue | select( (.pull_request != null) and + (.pull_request.html_url | ascii_downcase | contains("kong/kong-ee")) and + (.pull_request.merged_at != null) and + (.title | ascii_downcase | contains("cherry")) ) + | [.pull_request.html_url, .title] + | @tsv' + else + jq_filter='.[].source.issue | select( (.pull_request != null) and + (.pull_request.html_url | ascii_downcase | contains("kong/kong-ee")) and + (.pull_request.merged_at != null) ) + | [.pull_request.html_url, .title] + | @tsv' + fi + + local has_ref=0 + local json_response + for i in $( seq 1 "${count}" ) ; do + json_response=$( curl -sSL \ + -H "User-Agent: ${USER_AGENT}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "${req_url}?page=${i}&per_page=${per_page}" ) + + if jq -er "$jq_filter" <<< "$json_response" >/dev/null + then + has_ref=1 + break + fi + done + + if ! (( has_ref )) ; then echo "$1" | tee -a "$prs_no_cross_reference_file" ; fi +} + +function check_ce2ee () { + if [[ "$ORG_REPO" != "kong/kong" && "$ORG_REPO" != "Kong/kong" ]] ; then + warn "WARNING: only check CE2EE for CE repo. Skip $ORG_REPO" + return + fi + + echo -e "\nPRs without 'cherry-pick kong-ee' label:" + export ORG_REPO="$ORG_REPO" USER_AGENT="$USER_AGENT" prs_no_cherrypick_label_file="$prs_no_cherrypick_label_file" + export -f check_cherrypick_label + if type parallel >/dev/null 2>&1 ; then + parallel -j "$BULK" check_cherrypick_label <"$1" + else + warn "WARNING: GNU 'parallel' is not available, fallback to 'xargs'" + <"$1" xargs -P "$BULK" -n1 bash -c 'check_cherrypick_label "$@"' _ + fi + sort -uo "$prs_no_cherrypick_label_file" "$prs_no_cherrypick_label_file" + + echo -e "\nPRs without cross-referenced EE PRs:" + if (( SAFE_MODE )) ; then + local in_fd + if [[ -f "$1" ]] ; then + : {in_fd}<"$1" + else + : {in_fd}<&0 + warn "WARNING: $1 not a valid file. Read from stdin -" + fi + + while read -r -u "$in_fd" ; do + check_cross_reference "$REPLY" + done + + : ${in_fd}<&- + else + export ORG_REPO="$ORG_REPO" USER_AGENT="$USER_AGENT" STRICT_FILTER="$STRICT_FILTER" prs_no_cross_reference_file="$prs_no_cross_reference_file" + export -f check_cross_reference + if type parallel >/dev/null 2>&1 ; then + parallel -j "$BULK" check_cross_reference <"$1" + else + warn "WARNING: GNU 'parallel' is not available, fallback to 'xargs'" + <"$1" xargs -P "$BULK" -n1 bash -c 'check_cross_reference "$@"' _ + fi + fi + sort -uo "$prs_no_cross_reference_file" "$prs_no_cross_reference_file" +} + +function main () { + set -Eeo pipefail + trap die ERR SIGABRT SIGQUIT SIGHUP SIGINT + + set_globals + prepare_args "$@" + + printf '%s\n' "" "comparing between '${BASE_COMMIT}' and '${HEAD_COMMIT}'" + + get_commits_prs + + check_changelog "$prs_file" + + check_ce2ee "$prs_file" + + printf '%s\n' "" \ + "Commits: $commits_file" \ + "PRs: $prs_file" \ + "PRs without changelog: $prs_no_changelog_file" \ + "CE PRs without cherry-pick label: $prs_no_cherrypick_label_file" \ + "CE PRs without referenced EE cherry-pick PRs: $prs_no_cross_reference_file" \ + "" "Remeber to remove $out_dir" + + trap '' EXIT +} + +if (( "$#" )) ; then main "$@" ; else show_help ; fi From ab7232ea98be93f1c4a69482aa9995808df6819a Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Fri, 23 Feb 2024 12:33:55 +0200 Subject: [PATCH 58/91] chore(deps): bump pcre2 from 10.42 to 10.43 (#12603) ### Summary There are quite a lot of changes in this release (see ChangeLog and git log for a list). Those that are not bugfixes or code tidies are: * The JIT code no longer supports ARMv5 architecture. * A new function pcre2_get_match_data_heapframes_size() for finer heap control. * New option flags to restrict the interaction between ASCII and non-ASCII characters for caseless matching and \d and friends. There are also new pattern constructs to control these flags from within a pattern. * Upgrade to Unicode 15.0.0. * Treat a NULL pattern with zero length as an empty string. * Added support for limited-length variable-length lookbehind assertions, with a default maximum length of 255 characters (same as Perl) but with a function to adjust the limit. * Support for LoongArch in JIT. * Perl changed the meaning of (for example) {,3} which did not used to be recognized as a quantifier. Now it means {0,3} and PCRE2 has also changed. Note that {,} is still not a quantifier. * Following Perl, allow spaces and tabs after { and before } in all Perl- compatible items that use braces, and also around commas in quantifiers. The one exception in PCRE2 is \u{...}, which is from ECMAScript, not Perl, and PCRE2 follows ECMAScript usage. * Changed the meaning of \w and its synonyms and derivatives (\b and \B) in UCP mode to follow Perl. It now matches characters whose general categories are L or N or whose particular categories are Mn (non-spacing mark) or Pc (combining punctuation). * Changed the default meaning of [:xdigit:] in UCP mode to follow Perl. It now matches the "fullwidth" versions of hex digits. PCRE2_EXTRA_ASCII_DIGIT can be used to keep it ASCII only. * Make PCRE2_UCP the default in UTF mode in pcre2grep and add -no_ucp, --case-restrict and --posix-digit. * Add --group-separator and --no-group-separator to pcre2grep. Signed-off-by: Aapo Talvensaari --- .requirements | 4 ++-- build/openresty/pcre/pcre_repositories.bzl | 2 +- changelog/unreleased/kong/bump-pcre.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.requirements b/.requirements index db51855b150..286634dc112 100644 --- a/.requirements +++ b/.requirements @@ -3,7 +3,7 @@ KONG_PACKAGE_NAME=kong OPENRESTY=1.25.3.1 LUAROCKS=3.9.2 OPENSSL=3.2.1 -PCRE=10.42 +PCRE=10.43 LIBEXPAT=2.5.0 LUA_KONG_NGINX_MODULE=4fbc3ddc7dcbc706ed286b95344f3cb6da17e637 # 0.8.0 @@ -19,4 +19,4 @@ WASMTIME=14.0.3 V8=10.5.18 NGX_BROTLI=a71f9312c2deb28875acc7bacfdd5695a111aa53 # master branch of Jan 23, 2024 -BROTLI=ed738e842d2fbdf2d6459e39267a633c4a9b2f5d # master branch of brotli deps submodule of Jan 23, 2024 \ No newline at end of file +BROTLI=ed738e842d2fbdf2d6459e39267a633c4a9b2f5d # master branch of brotli deps submodule of Jan 23, 2024 diff --git a/build/openresty/pcre/pcre_repositories.bzl b/build/openresty/pcre/pcre_repositories.bzl index bb593ffc7ad..b1ad394d7e1 100644 --- a/build/openresty/pcre/pcre_repositories.bzl +++ b/build/openresty/pcre/pcre_repositories.bzl @@ -12,7 +12,7 @@ def pcre_repositories(): name = "pcre", build_file = "//build/openresty/pcre:BUILD.pcre.bazel", strip_prefix = "pcre2-" + version, - sha256 = "c33b418e3b936ee3153de2c61cc638e7e4fe3156022a5c77d0711bcbb9d64f1f", + sha256 = "889d16be5abb8d05400b33c25e151638b8d4bac0e2d9c76e9d6923118ae8a34e", urls = [ "https://github.com/PCRE2Project/pcre2/releases/download/pcre2-" + version + "/pcre2-" + version + ".tar.gz", ], diff --git a/changelog/unreleased/kong/bump-pcre.yml b/changelog/unreleased/kong/bump-pcre.yml index b397c5a153c..c5cea017350 100644 --- a/changelog/unreleased/kong/bump-pcre.yml +++ b/changelog/unreleased/kong/bump-pcre.yml @@ -1,3 +1,3 @@ -message: "Bumped PCRE from the legacy libpcre 8.45 to libpcre2 10.42" +message: "Bumped PCRE from the legacy libpcre 8.45 to libpcre2 10.43" type: dependency scope: Core From 7e31f0863ce3314d9b67457d1ae74fea09075ffc Mon Sep 17 00:00:00 2001 From: Murillo <103451714+gruceo@users.noreply.github.com> Date: Thu, 22 Feb 2024 14:40:12 -0300 Subject: [PATCH 59/91] fix(api): avoid returning 405 on /schemas/vaults/:name This fixes an issue where calling the endpoint `POST /schemas/vaults/validate` was conflicting with the endpoint `/schemas/vaults/:name` which only has GET implemented, hence resulting in a 405. By explicting defining a new endpoint `/schemas/vaults/validate`, Lapis framework should take care of always choosing it over `/schemas/vaults/:name`. KAG-3699 --- .../kong/fix_api_405_vaults_validate_endpoint.yml | 3 +++ kong/api/routes/kong.lua | 5 +++++ .../04-admin_api/02-kong_routes_spec.lua | 10 ++++++++++ 3 files changed, 18 insertions(+) create mode 100644 changelog/unreleased/kong/fix_api_405_vaults_validate_endpoint.yml diff --git a/changelog/unreleased/kong/fix_api_405_vaults_validate_endpoint.yml b/changelog/unreleased/kong/fix_api_405_vaults_validate_endpoint.yml new file mode 100644 index 00000000000..3c102e6a3ff --- /dev/null +++ b/changelog/unreleased/kong/fix_api_405_vaults_validate_endpoint.yml @@ -0,0 +1,3 @@ +message: "**Admin API**: fixed an issue where calling the endpoint `POST /schemas/vaults/validate` was conflicting with the endpoint `/schemas/vaults/:name` which only has GET implemented, hence resulting in a 405." +type: bugfix +scope: Admin API diff --git a/kong/api/routes/kong.lua b/kong/api/routes/kong.lua index a80615302c3..d2fa8a59443 100644 --- a/kong/api/routes/kong.lua +++ b/kong/api/routes/kong.lua @@ -200,6 +200,11 @@ return { return validate_schema("plugins", self.params) end }, + ["/schemas/vaults/validate"] = { + POST = function(self, db, helpers) + return validate_schema("vaults", self.params) + end + }, ["/schemas/:db_entity_name/validate"] = { POST = function(self, db, helpers) local db_entity_name = self.params.db_entity_name diff --git a/spec/02-integration/04-admin_api/02-kong_routes_spec.lua b/spec/02-integration/04-admin_api/02-kong_routes_spec.lua index 675e00eb58b..7c28d682fac 100644 --- a/spec/02-integration/04-admin_api/02-kong_routes_spec.lua +++ b/spec/02-integration/04-admin_api/02-kong_routes_spec.lua @@ -485,6 +485,16 @@ describe("Admin API - Kong routes with strategy #" .. strategy, function() local json = cjson.decode(body) assert.same({ message = "No vault named 'not-present'" }, json) end) + + it("does not return 405 on /schemas/vaults/validate", function() + local res = assert(client:send { + method = "POST", + path = "/schemas/vaults/validate", + }) + local body = assert.res_status(400, res) + local json = cjson.decode(body) + assert.same("schema violation (name: required field missing)", json.message) + end) end) describe("/schemas/:entity", function() From 271679777ce0f5a076a269ac0221b921cc35a210 Mon Sep 17 00:00:00 2001 From: Zachary Hu Date: Mon, 26 Feb 2024 12:45:00 +0800 Subject: [PATCH 60/91] chore(release): unify changelog PR reference links --- changelog/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog/Makefile b/changelog/Makefile index 9f88b59c9cb..8909616bcd6 100644 --- a/changelog/Makefile +++ b/changelog/Makefile @@ -54,7 +54,7 @@ generate: --repo-path . \ --changelog-paths $(VERSION)/kong,$(UNRELEASED_DIR)/kong \ --title Kong \ - --github-issue-repo Kong/kong \ + --github-issue-repo $(OWNER_REPO) \ --github-api-repo $(OWNER_REPO) \ --with-jiras \ >> $(VERSION).md; \ @@ -63,7 +63,7 @@ generate: --repo-path . \ --changelog-paths $(UNRELEASED_DIR)/kong \ --title Kong \ - --github-issue-repo Kong/kong \ + --github-issue-repo $(OWNER_REPO) \ --github-api-repo $(OWNER_REPO) \ --with-jiras \ >> $(VERSION).md; \ From ceef39834e3a09ae54534d53e7b48e45133251f9 Mon Sep 17 00:00:00 2001 From: Zachary Hu Date: Mon, 26 Feb 2024 15:58:02 +0800 Subject: [PATCH 61/91] chore(release): do not generate changelogs if there is no yml file in the changelog directory --- changelog/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog/Makefile b/changelog/Makefile index 8909616bcd6..82c447373eb 100644 --- a/changelog/Makefile +++ b/changelog/Makefile @@ -48,7 +48,7 @@ generate: @rm -f $(VERSION).md @touch $(VERSION).md - @if [ -d "$(UNRELEASED_DIR)/kong" ]; then \ + @if [ -d "$(UNRELEASED_DIR)/kong" ] && [ -n "$$(shopt -s nullglob; echo $(UNRELEASED_DIR)/kong/*.yml)" ] ; then \ if [ -f "$(VERSION)/$(VERSION).md" ]; then \ changelog --debug=$(DEBUG) generate \ --repo-path . \ @@ -69,7 +69,7 @@ generate: >> $(VERSION).md; \ fi \ fi - @if [ -d "$(UNRELEASED_DIR)/kong-manager" ]; then \ + @if [ -d "$(UNRELEASED_DIR)/kong-manager" ] && [ -n "$$(shopt -s nullglob; echo $(UNRELEASED_DIR)/kong-manager/*.yml)" ] ; then \ if [ -f "$(VERSION)/$(VERSION).md" ]; then \ changelog --debug=$(DEBUG) generate \ --repo-path . \ From 7473c81c936c79037f6a5266f9b42a35de2275a5 Mon Sep 17 00:00:00 2001 From: Zachary Hu Date: Mon, 26 Feb 2024 15:58:28 +0800 Subject: [PATCH 62/91] chore(release): add .gitkeep to empty changelog dir when generating the changelog PR --- changelog/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog/Makefile b/changelog/Makefile index 82c447373eb..d7cd67bdace 100644 --- a/changelog/Makefile +++ b/changelog/Makefile @@ -102,6 +102,7 @@ push_changelog: mkdir -p $(VERSION)/$$i ; \ git mv -k $(UNRELEASED_DIR)/$$i/*.yml $(VERSION)/$$i/ ; \ touch $(UNRELEASED_DIR)/$$i/.gitkeep ; \ + touch $(VERSION)/$$i/.gitkeep ; \ done @git add . @git commit -m "docs(release): genereate $(VERSION) changelog" From 6470d9bf925ef71966103d347006b47db4ab6f69 Mon Sep 17 00:00:00 2001 From: Zachary Hu <6426329+outsinre@users.noreply.github.com> Date: Mon, 26 Feb 2024 19:21:21 +0800 Subject: [PATCH 63/91] fix(core): disallow delete or create workspaces (#12374) Fix FTI-5620 Co-authored-by: Guilherme Salazar --- kong/db/schema/entities/workspaces.lua | 1 + .../04-admin_api/25-workspaces_spec.lua | 50 +++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 spec/02-integration/04-admin_api/25-workspaces_spec.lua diff --git a/kong/db/schema/entities/workspaces.lua b/kong/db/schema/entities/workspaces.lua index 79f45b10d5b..153eeb57f69 100644 --- a/kong/db/schema/entities/workspaces.lua +++ b/kong/db/schema/entities/workspaces.lua @@ -7,6 +7,7 @@ return { cache_key = { "name" }, endpoint_key = "name", dao = "kong.db.dao.workspaces", + generate_admin_api = false, fields = { { id = typedefs.uuid }, diff --git a/spec/02-integration/04-admin_api/25-workspaces_spec.lua b/spec/02-integration/04-admin_api/25-workspaces_spec.lua new file mode 100644 index 00000000000..bc0d4e5ac51 --- /dev/null +++ b/spec/02-integration/04-admin_api/25-workspaces_spec.lua @@ -0,0 +1,50 @@ +local helpers = require "spec.helpers" +local cjson = require "cjson" + +for _, strategy in helpers.each_strategy() do + describe("Admin API - workspaces #" .. strategy, function() + local db, admin_client + + lazy_setup(function() + _, db = helpers.get_db_utils(strategy,{ "workspaces" }) + + assert(helpers.start_kong({ + database = strategy, + })) + end) + + lazy_teardown(function() + helpers.stop_kong() + end) + + before_each(function() + admin_client = helpers.admin_client() + end) + + after_each(function() + if admin_client then admin_client:close() end + end) + + it("has no admin api", function() + finally(function() db:truncate("workspaces") end) + + local res = assert(admin_client:post("/workspaces", { + body = { name = "jim" }, + headers = {["Content-Type"] = "application/json"}, + })) + + local body = assert.res_status(404, res) + body = cjson.decode(body) + assert.match("Not found", body.message) + end) + + it("disallow deletion", function() + finally(function() db:truncate("workspaces") end) + + local res = assert(admin_client:delete("/workspaces/default")) + local body = assert.res_status(404, res) + body = cjson.decode(body) + assert.match("Not found", body.message) + end) + end) +end From bb228ffad0b41889f4b972788eed0176568f39cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Nowak?= Date: Mon, 26 Feb 2024 12:54:21 +0100 Subject: [PATCH 64/91] chore(tests): remove redis>=v6 checks in tests (#12617) There were a lot of checks for redis version being at least 6.0.0 It's been 4 years since redis 6.0.0 release and we don't test against lower versions anymore so these checks are no longer needed. KAG-2130 --- .../23-rate-limiting/05-integration_spec.lua | 242 ++++++++--------- .../05-integration_spec.lua | 244 ++++++++---------- 2 files changed, 218 insertions(+), 268 deletions(-) diff --git a/spec/03-plugins/23-rate-limiting/05-integration_spec.lua b/spec/03-plugins/23-rate-limiting/05-integration_spec.lua index 1ec13be7900..7f0239aa499 100644 --- a/spec/03-plugins/23-rate-limiting/05-integration_spec.lua +++ b/spec/03-plugins/23-rate-limiting/05-integration_spec.lua @@ -1,5 +1,4 @@ local helpers = require "spec.helpers" -local version = require "version" local cjson = require "cjson" local redis_helper = require "spec.helpers.redis_helper" @@ -23,7 +22,6 @@ describe("Plugin: rate-limiting (integration)", function() local client local bp local red - local red_version lazy_setup(function() bp = helpers.get_db_utils(nil, { @@ -33,7 +31,7 @@ describe("Plugin: rate-limiting (integration)", function() }, { "rate-limiting" }) - red, red_version = redis_helper.connect(REDIS_HOST, REDIS_PORT) + red = redis_helper.connect(REDIS_HOST, REDIS_PORT) end) lazy_teardown(function() @@ -77,10 +75,8 @@ describe("Plugin: rate-limiting (integration)", function() lazy_setup(function() red:flushall() - if red_version >= version("6.0.0") then - redis_helper.add_admin_user(red, REDIS_USER_VALID, REDIS_PASSWORD) - redis_helper.add_basic_user(red, REDIS_USER_INVALID, REDIS_PASSWORD) - end + redis_helper.add_admin_user(red, REDIS_USER_VALID, REDIS_PASSWORD) + redis_helper.add_basic_user(red, REDIS_USER_INVALID, REDIS_PASSWORD) bp = helpers.get_db_utils(nil, { "routes", @@ -135,56 +131,53 @@ describe("Plugin: rate-limiting (integration)", function() }, }) - if red_version >= version("6.0.0") then - local route3 = assert(bp.routes:insert { - hosts = { "redistest3.test" }, - }) - assert(bp.plugins:insert { - name = "rate-limiting", - route = { id = route3.id }, - config = { - minute = 2, -- Handle multiple tests - policy = "redis", - redis = { - host = REDIS_HOST, - port = config.redis_port, - username = REDIS_USER_VALID, - password = REDIS_PASSWORD, - database = REDIS_DB_3, -- ensure to not get a pooled authenticated connection by using a different db - ssl = config.redis_ssl, - ssl_verify = config.redis_ssl_verify, - server_name = config.redis_server_name, - timeout = 10000, - }, - fault_tolerant = false, - }, - }) - - local route4 = assert(bp.routes:insert { - hosts = { "redistest4.test" }, - }) - assert(bp.plugins:insert { - name = "rate-limiting", - route = { id = route4.id }, - config = { - minute = 1, - policy = "redis", - redis = { - host = REDIS_HOST, - port = config.redis_port, - username = REDIS_USER_INVALID, - password = REDIS_PASSWORD, - database = REDIS_DB_4, -- ensure to not get a pooled authenticated connection by using a different db - ssl = config.redis_ssl, - ssl_verify = config.redis_ssl_verify, - server_name = config.redis_server_name, - timeout = 10000, - }, - fault_tolerant = false, + local route3 = assert(bp.routes:insert { + hosts = { "redistest3.test" }, + }) + assert(bp.plugins:insert { + name = "rate-limiting", + route = { id = route3.id }, + config = { + minute = 2, -- Handle multiple tests + policy = "redis", + redis = { + host = REDIS_HOST, + port = config.redis_port, + username = REDIS_USER_VALID, + password = REDIS_PASSWORD, + database = REDIS_DB_3, -- ensure to not get a pooled authenticated connection by using a different db + ssl = config.redis_ssl, + ssl_verify = config.redis_ssl_verify, + server_name = config.redis_server_name, + timeout = 10000, }, - }) - end + fault_tolerant = false, + }, + }) + local route4 = assert(bp.routes:insert { + hosts = { "redistest4.test" }, + }) + assert(bp.plugins:insert { + name = "rate-limiting", + route = { id = route4.id }, + config = { + minute = 1, + policy = "redis", + redis = { + host = REDIS_HOST, + port = config.redis_port, + username = REDIS_USER_INVALID, + password = REDIS_PASSWORD, + database = REDIS_DB_4, -- ensure to not get a pooled authenticated connection by using a different db + ssl = config.redis_ssl, + ssl_verify = config.redis_ssl_verify, + server_name = config.redis_server_name, + timeout = 10000, + }, + fault_tolerant = false, + }, + }) assert(helpers.start_kong({ nginx_conf = "spec/fixtures/custom_nginx.template", @@ -195,10 +188,8 @@ describe("Plugin: rate-limiting (integration)", function() lazy_teardown(function() helpers.stop_kong() - if red_version >= version("6.0.0") then - redis_helper.remove_user(red, REDIS_USER_VALID) - redis_helper.remove_user(red, REDIS_USER_INVALID) - end + redis_helper.remove_user(red, REDIS_USER_VALID) + redis_helper.remove_user(red, REDIS_USER_INVALID) end) it("connection pool respects database setting", function() @@ -210,11 +201,10 @@ describe("Plugin: rate-limiting (integration)", function() assert.equal(0, tonumber(size_1)) assert.equal(0, tonumber(size_2)) - if red_version >= version("6.0.0") then - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - assert.equal(0, tonumber(size_3)) - end + + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + assert.equal(0, tonumber(size_3)) local res = assert(client:send { method = "GET", @@ -239,11 +229,10 @@ describe("Plugin: rate-limiting (integration)", function() assert.equal(1, tonumber(size_1)) assert.equal(0, tonumber(size_2)) - if red_version >= version("6.0.0") then - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - assert.equal(0, tonumber(size_3)) - end + + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + assert.equal(0, tonumber(size_3)) -- rate-limiting plugin will reuses the redis connection local res = assert(client:send { @@ -269,76 +258,63 @@ describe("Plugin: rate-limiting (integration)", function() assert.equal(1, tonumber(size_1)) assert.equal(1, tonumber(size_2)) - if red_version >= version("6.0.0") then - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - assert.equal(0, tonumber(size_3)) - end - - if red_version >= version("6.0.0") then - -- rate-limiting plugin will reuses the redis connection - local res = assert(client:send { - method = "GET", - path = "/status/200", - headers = { - ["Host"] = "redistest3.test" - } - }) - assert.res_status(200, res) - - -- Wait for async timer to increment the limit - - ngx.sleep(SLEEP_TIME) - - assert(red:select(REDIS_DB_1)) - size_1 = assert(red:dbsize()) - - assert(red:select(REDIS_DB_2)) - size_2 = assert(red:dbsize()) - - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - - -- TEST: All DBs should now have one hit, because the - -- plugin correctly chose to select the database it is - -- configured to hit - - assert.is_true(tonumber(size_1) > 0) - assert.is_true(tonumber(size_2) > 0) - assert.is_true(tonumber(size_3) > 0) - end + + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + assert.equal(0, tonumber(size_3)) + + -- rate-limiting plugin will reuses the redis connection + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "redistest3.test" + } + }) + assert.res_status(200, res) + + -- Wait for async timer to increment the limit + + ngx.sleep(SLEEP_TIME) + + assert(red:select(REDIS_DB_1)) + size_1 = assert(red:dbsize()) + + assert(red:select(REDIS_DB_2)) + size_2 = assert(red:dbsize()) + + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + + -- TEST: All DBs should now have one hit, because the + -- plugin correctly chose to select the database it is + -- configured to hit + + assert.is_true(tonumber(size_1) > 0) + assert.is_true(tonumber(size_2) > 0) + assert.is_true(tonumber(size_3) > 0) end) it("authenticates and executes with a valid redis user having proper ACLs", function() - if red_version >= version("6.0.0") then - local res = assert(client:send { - method = "GET", - path = "/status/200", - headers = { - ["Host"] = "redistest3.test" - } - }) - assert.res_status(200, res) - else - ngx.log(ngx.WARN, "Redis v" .. tostring(red_version) .. " does not support ACL functions " .. - "'authenticates and executes with a valid redis user having proper ACLs' will be skipped") - end + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "redistest3.test" + } + }) + assert.res_status(200, res) end) it("fails to rate-limit for a redis user with missing ACLs", function() - if red_version >= version("6.0.0") then - local res = assert(client:send { - method = "GET", - path = "/status/200", - headers = { - ["Host"] = "redistest4.test" - } - }) - assert.res_status(500, res) - else - ngx.log(ngx.WARN, "Redis v" .. tostring(red_version) .. " does not support ACL functions " .. - "'fails to rate-limit for a redis user with missing ACLs' will be skipped") - end + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "redistest4.test" + } + }) + assert.res_status(500, res) end) end) end diff --git a/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua b/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua index d4e3cef0d0b..2e17d1f196f 100644 --- a/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua +++ b/spec/03-plugins/24-response-rate-limiting/05-integration_spec.lua @@ -1,11 +1,7 @@ local helpers = require "spec.helpers" -local version = require "version" local cjson = require "cjson" local redis_helper = require "spec.helpers.redis_helper" -local tostring = tostring - - local REDIS_HOST = helpers.redis_host local REDIS_PORT = helpers.redis_port local REDIS_SSL_PORT = helpers.redis_ssl_port @@ -27,7 +23,6 @@ describe("Plugin: rate-limiting (integration)", function() local client local bp local red - local red_version lazy_setup(function() -- only to run migrations @@ -38,7 +33,7 @@ describe("Plugin: rate-limiting (integration)", function() }, { "response-ratelimiting", }) - red, red_version = redis_helper.connect(REDIS_HOST, REDIS_PORT) + red = redis_helper.connect(REDIS_HOST, REDIS_PORT) end) lazy_teardown(function() @@ -80,10 +75,8 @@ describe("Plugin: rate-limiting (integration)", function() lazy_setup(function() red:flushall() - if red_version >= version("6.0.0") then - redis_helper.add_admin_user(red, REDIS_USER_VALID, REDIS_PASSWORD) - redis_helper.add_basic_user(red, REDIS_USER_INVALID, REDIS_PASSWORD) - end + redis_helper.add_admin_user(red, REDIS_USER_VALID, REDIS_PASSWORD) + redis_helper.add_basic_user(red, REDIS_USER_INVALID, REDIS_PASSWORD) bp = helpers.get_db_utils(nil, { "routes", @@ -137,55 +130,53 @@ describe("Plugin: rate-limiting (integration)", function() }, }) - if red_version >= version("6.0.0") then - local route3 = assert(bp.routes:insert { - hosts = { "redistest3.test" }, - }) - assert(bp.plugins:insert { - name = "response-ratelimiting", - route = { id = route3.id }, - config = { - policy = "redis", - redis = { - host = REDIS_HOST, - port = config.redis_port, - username = REDIS_USER_VALID, - password = REDIS_PASSWORD, - database = REDIS_DB_3, - ssl = config.redis_ssl, - ssl_verify = config.redis_ssl_verify, - server_name = config.redis_server_name, - timeout = 10000, - }, - fault_tolerant = false, - limits = { video = { minute = 6 } }, + local route3 = assert(bp.routes:insert { + hosts = { "redistest3.test" }, + }) + assert(bp.plugins:insert { + name = "response-ratelimiting", + route = { id = route3.id }, + config = { + policy = "redis", + redis = { + host = REDIS_HOST, + port = config.redis_port, + username = REDIS_USER_VALID, + password = REDIS_PASSWORD, + database = REDIS_DB_3, + ssl = config.redis_ssl, + ssl_verify = config.redis_ssl_verify, + server_name = config.redis_server_name, + timeout = 10000, }, - }) - - local route4 = assert(bp.routes:insert { - hosts = { "redistest4.test" }, - }) - assert(bp.plugins:insert { - name = "response-ratelimiting", - route = { id = route4.id }, - config = { - policy = "redis", - redis = { - host = REDIS_HOST, - port = config.redis_port, - username = REDIS_USER_INVALID, - password = REDIS_PASSWORD, - database = REDIS_DB_4, - ssl = config.redis_ssl, - ssl_verify = config.redis_ssl_verify, - server_name = config.redis_server_name, - timeout = 10000, - }, - fault_tolerant = false, - limits = { video = { minute = 6 } }, + fault_tolerant = false, + limits = { video = { minute = 6 } }, + }, + }) + + local route4 = assert(bp.routes:insert { + hosts = { "redistest4.test" }, + }) + assert(bp.plugins:insert { + name = "response-ratelimiting", + route = { id = route4.id }, + config = { + policy = "redis", + redis = { + host = REDIS_HOST, + port = config.redis_port, + username = REDIS_USER_INVALID, + password = REDIS_PASSWORD, + database = REDIS_DB_4, + ssl = config.redis_ssl, + ssl_verify = config.redis_ssl_verify, + server_name = config.redis_server_name, + timeout = 10000, }, - }) - end + fault_tolerant = false, + limits = { video = { minute = 6 } }, + }, + }) assert(helpers.start_kong({ nginx_conf = "spec/fixtures/custom_nginx.template", @@ -196,10 +187,8 @@ describe("Plugin: rate-limiting (integration)", function() lazy_teardown(function() helpers.stop_kong() - if red_version >= version("6.0.0") then - redis_helper.remove_user(red, REDIS_USER_VALID) - redis_helper.remove_user(red, REDIS_USER_INVALID) - end + redis_helper.remove_user(red, REDIS_USER_VALID) + redis_helper.remove_user(red, REDIS_USER_INVALID) end) it("connection pool respects database setting", function() @@ -211,11 +200,10 @@ describe("Plugin: rate-limiting (integration)", function() assert.equal(0, tonumber(size_1)) assert.equal(0, tonumber(size_2)) - if red_version >= version("6.0.0") then - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - assert.equal(0, tonumber(size_3)) - end + + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + assert.equal(0, tonumber(size_3)) local res = assert(client:send { method = "GET", @@ -242,11 +230,10 @@ describe("Plugin: rate-limiting (integration)", function() assert.is_true(tonumber(size_1) > 0) assert.equal(0, tonumber(size_2)) - if red_version >= version("6.0.0") then - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - assert.equal(0, tonumber(size_3)) - end + + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + assert.equal(0, tonumber(size_3)) -- response-ratelimiting plugin reuses the redis connection local res = assert(client:send { @@ -274,78 +261,65 @@ describe("Plugin: rate-limiting (integration)", function() assert.is_true(tonumber(size_1) > 0) assert.is_true(tonumber(size_2) > 0) - if red_version >= version("6.0.0") then - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - assert.equal(0, tonumber(size_3)) - end + + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + assert.equal(0, tonumber(size_3)) -- response-ratelimiting plugin reuses the redis connection - if red_version >= version("6.0.0") then - local res = assert(client:send { - method = "GET", - path = "/response-headers?x-kong-limit=video=1", - headers = { - ["Host"] = "redistest3.test" - } - }) - assert.res_status(200, res) - assert.equal(6, tonumber(res.headers["x-ratelimit-limit-video-minute"])) - assert.equal(5, tonumber(res.headers["x-ratelimit-remaining-video-minute"])) - - -- Wait for async timer to increment the limit - - ngx.sleep(SLEEP_TIME) - - assert(red:select(REDIS_DB_1)) - size_1 = assert(red:dbsize()) - - assert(red:select(REDIS_DB_2)) - size_2 = assert(red:dbsize()) - - assert(red:select(REDIS_DB_3)) - local size_3 = assert(red:dbsize()) - - -- TEST: All DBs should now have one hit, because the - -- plugin correctly chose to select the database it is - -- configured to hit - - assert.is_true(tonumber(size_1) > 0) - assert.is_true(tonumber(size_2) > 0) - assert.is_true(tonumber(size_3) > 0) - end + local res = assert(client:send { + method = "GET", + path = "/response-headers?x-kong-limit=video=1", + headers = { + ["Host"] = "redistest3.test" + } + }) + assert.res_status(200, res) + assert.equal(6, tonumber(res.headers["x-ratelimit-limit-video-minute"])) + assert.equal(5, tonumber(res.headers["x-ratelimit-remaining-video-minute"])) + + -- Wait for async timer to increment the limit + + ngx.sleep(SLEEP_TIME) + + assert(red:select(REDIS_DB_1)) + size_1 = assert(red:dbsize()) + + assert(red:select(REDIS_DB_2)) + size_2 = assert(red:dbsize()) + + assert(red:select(REDIS_DB_3)) + local size_3 = assert(red:dbsize()) + + -- TEST: All DBs should now have one hit, because the + -- plugin correctly chose to select the database it is + -- configured to hit + + assert.is_true(tonumber(size_1) > 0) + assert.is_true(tonumber(size_2) > 0) + assert.is_true(tonumber(size_3) > 0) end) it("authenticates and executes with a valid redis user having proper ACLs", function() - if red_version >= version("6.0.0") then - local res = assert(client:send { - method = "GET", - path = "/status/200", - headers = { - ["Host"] = "redistest3.test" - } - }) - assert.res_status(200, res) - else - ngx.log(ngx.WARN, "Redis v" .. tostring(red_version) .. " does not support ACL functions " .. - "'authenticates and executes with a valid redis user having proper ACLs' will be skipped") - end + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "redistest3.test" + } + }) + assert.res_status(200, res) end) it("fails to rate-limit for a redis user with missing ACLs", function() - if red_version >= version("6.0.0") then - local res = assert(client:send { - method = "GET", - path = "/status/200", - headers = { - ["Host"] = "redistest4.test" - } - }) - assert.res_status(500, res) - else - ngx.log(ngx.WARN, "Redis v" .. tostring(red_version) .. " does not support ACL functions " .. - "'fails to response rate-limit for a redis user with missing ACLs' will be skipped") - end + local res = assert(client:send { + method = "GET", + path = "/status/200", + headers = { + ["Host"] = "redistest4.test" + } + }) + assert.res_status(500, res) end) end) end -- end for each strategy From fde38744022faf6b75be66be321d13b2ad0caa59 Mon Sep 17 00:00:00 2001 From: Chrono Date: Tue, 27 Feb 2024 10:14:54 +0800 Subject: [PATCH 65/91] refactor(router/atc): move assertion to unlikely path (#12466) --- kong/router/fields.lua | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/kong/router/fields.lua b/kong/router/fields.lua index 8bcdd7fbcb7..d975ce465c8 100644 --- a/kong/router/fields.lua +++ b/kong/router/fields.lua @@ -197,8 +197,10 @@ else -- stream end -- is_http --- stream subsystem need not to generate func -local get_field_accessor = function(funcs, field) end +-- stream subsystem needs not to generate func +local function get_field_accessor(funcs, field) + error("unknown router matching schema field: " .. field) +end if is_http then @@ -359,7 +361,8 @@ if is_http then return f end -- if field:sub(1, HTTP_SEGMENTS_PREFIX_LEN) - -- others return nil + -- others are error + error("unknown router matching schema field: " .. field) end end -- is_http @@ -451,8 +454,6 @@ function _M:get_value(field, params, ctx) local func = FIELDS_FUNCS[field] or get_field_accessor(self.funcs, field) - assert(func, "unknown router matching schema field: " .. field) - return func(params, ctx) end From 184250b6f1e99bbd4447f5d9bf541ba2f8362f65 Mon Sep 17 00:00:00 2001 From: Qi Date: Tue, 27 Feb 2024 10:16:23 +0800 Subject: [PATCH 66/91] fix(request-debugging): add missing `router` section of the timing output (#12234) --- .../fix-missing-router-section-of-request-debugging.yml | 3 +++ kong/timing/init.lua | 8 ++++++++ .../21-request-debug/01-request-debug_spec.lua | 4 ++++ 3 files changed, 15 insertions(+) create mode 100644 changelog/unreleased/kong/fix-missing-router-section-of-request-debugging.yml diff --git a/changelog/unreleased/kong/fix-missing-router-section-of-request-debugging.yml b/changelog/unreleased/kong/fix-missing-router-section-of-request-debugging.yml new file mode 100644 index 00000000000..7ae106f21bb --- /dev/null +++ b/changelog/unreleased/kong/fix-missing-router-section-of-request-debugging.yml @@ -0,0 +1,3 @@ +message: Fix the missing router section for the output of the request-debugging +type: bugfix +scope: Core diff --git a/kong/timing/init.lua b/kong/timing/init.lua index 8b15304c319..9b9c5df3199 100644 --- a/kong/timing/init.lua +++ b/kong/timing/init.lua @@ -306,6 +306,14 @@ function _M.register_hooks() _M.leave_context() -- leave plugin_id _M.leave_context() -- leave plugin_name end) + + req_dyn_hook.hook("timing", "before:router", function() + _M.enter_context("router") + end) + + req_dyn_hook.hook("timing", "after:router", function() + _M.leave_context() -- leave router + end) end diff --git a/spec/02-integration/21-request-debug/01-request-debug_spec.lua b/spec/02-integration/21-request-debug/01-request-debug_spec.lua index 8be19151782..13d626f474c 100644 --- a/spec/02-integration/21-request-debug/01-request-debug_spec.lua +++ b/spec/02-integration/21-request-debug/01-request-debug_spec.lua @@ -535,6 +535,7 @@ describe(desc, function() assert.truthy(header_output.child.rewrite) assert.truthy(header_output.child.access) assert.truthy(header_output.child.access.child.dns) -- upstream is resolved in access phase + assert.truthy(header_output.child.access.child.router) -- router is executed in access phase assert(header_output.child.access.child.dns.child.localhost.child.resolve.cache_hit ~= nil, "dns cache hit should be recorded") assert.truthy(header_output.child.balancer) assert.truthy(header_output.child.header_filter) @@ -542,6 +543,7 @@ describe(desc, function() assert.truthy(log_output.child.rewrite) assert.truthy(log_output.child.access) assert.truthy(log_output.child.access.child.dns) -- upstream is resolved in access phase + assert.truthy(log_output.child.access.child.router) -- router is executed in access phase assert(log_output.child.access.child.dns.child.localhost.child.resolve.cache_hit ~= nil, "dns cache hit should be recorded") assert.truthy(log_output.child.balancer) assert.truthy(log_output.child.header_filter) @@ -573,11 +575,13 @@ describe(desc, function() assert.truthy(header_output.child.rewrite) assert.truthy(header_output.child.access) assert.truthy(header_output.child.access.child.dns) -- upstream is resolved in access phase + assert.truthy(header_output.child.access.child.router) -- router is executed in access phase assert.truthy(header_output.child.response) assert.truthy(log_output.child.rewrite) assert.truthy(log_output.child.access) assert.truthy(log_output.child.access.child.dns) -- upstream is resolved in access phase + assert.truthy(header_output.child.access.child.router) -- router is executed in access phase assert.truthy(log_output.child.body_filter) assert.truthy(log_output.child.log) From e613aa1cbf3bde1ee0676c3c5be65221c3fdd54e Mon Sep 17 00:00:00 2001 From: Chrono Date: Tue, 27 Feb 2024 10:31:28 +0800 Subject: [PATCH 67/91] refactor(db/schema): do not generate validator of router expression for non-traditional flavors (#12430) --- kong/db/schema/entities/routes.lua | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/kong/db/schema/entities/routes.lua b/kong/db/schema/entities/routes.lua index c0ec191cc33..148a2b8aab2 100644 --- a/kong/db/schema/entities/routes.lua +++ b/kong/db/schema/entities/routes.lua @@ -1,24 +1,27 @@ local typedefs = require("kong.db.schema.typedefs") -local router = require("resty.router.router") local deprecation = require("kong.deprecation") +local kong_router_flavor = kong and kong.configuration and kong.configuration.router_flavor + +-- works with both `traditional_compatible` and `expressions` routes local validate_route -do +if kong_router_flavor ~= "traditional" then local ipairs = ipairs local tonumber = tonumber local re_match = ngx.re.match + local router = require("resty.router.router") local get_schema = require("kong.router.atc").schema - local get_expression = require("kong.router.compat").get_expression - local transform_expression = require("kong.router.expressions").transform_expression + local get_expression = kong_router_flavor == "traditional_compatible" and + require("kong.router.compat").get_expression or + require("kong.router.expressions").transform_expression local HTTP_PATH_SEGMENTS_PREFIX = "http.path.segments." local HTTP_PATH_SEGMENTS_SUFFIX_REG = [[^(0|[1-9]\d*)(_([1-9]\d*))?$]] - -- works with both `traditional_compatiable` and `expressions` routes` validate_route = function(entity) local schema = get_schema(entity.protocols) - local exp = transform_expression(entity) or get_expression(entity) + local exp = get_expression(entity) local fields, err = router.validate(schema, exp) if not fields then @@ -35,14 +38,12 @@ do return nil, "Router Expression failed validation: " .. "illformed http.path.segments.* field" end - end - end + end -- if f:find + end -- for fields return true end -end - -local kong_router_flavor = kong and kong.configuration and kong.configuration.router_flavor +end -- if kong_router_flavor ~= "traditional" if kong_router_flavor == "expressions" then return { From c5ed954e4ce517956173eaa860b6344d2a6cd06c Mon Sep 17 00:00:00 2001 From: Xiaochen Wang Date: Tue, 27 Feb 2024 13:20:51 +0800 Subject: [PATCH 68/91] fix(conf): fix the default value of upstream_keepalive_max_requests (#12643) This commit fixes the discrepancy between the default value of upstream_keepalive_max_requests in the Kong.conf comments and the actual value in kong/templates/kong_defaults.lua. --- .../fix-default-value-of-upstream-keepalive-max-requests.yml | 5 +++++ kong.conf.default | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/kong/fix-default-value-of-upstream-keepalive-max-requests.yml diff --git a/changelog/unreleased/kong/fix-default-value-of-upstream-keepalive-max-requests.yml b/changelog/unreleased/kong/fix-default-value-of-upstream-keepalive-max-requests.yml new file mode 100644 index 00000000000..45eedd995d6 --- /dev/null +++ b/changelog/unreleased/kong/fix-default-value-of-upstream-keepalive-max-requests.yml @@ -0,0 +1,5 @@ +message: | + Fixed default value in kong.conf.default documentation from 1000 to 10000 + for upstream_keepalive_max_requests option. +type: bugfix +scope: Configuration diff --git a/kong.conf.default b/kong.conf.default index 77b9a28788f..0f2b7d22a5b 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -1020,7 +1020,7 @@ # each upstream request to open a new # connection. -#upstream_keepalive_max_requests = 1000 # Sets the default maximum number of +#upstream_keepalive_max_requests = 10000 # Sets the default maximum number of # requests than can be proxied upstream # through one keepalive connection. # After the maximum number of requests From 518d1fffd277ecfb4da21dd5e58962959b733ffa Mon Sep 17 00:00:00 2001 From: Chrono Date: Tue, 27 Feb 2024 13:52:01 +0800 Subject: [PATCH 69/91] refactor(router/atc): remove tail calls to avoid NYIs (#12476) NYI (Not Yet Implemented) might impact the performance of the LuaJIT. Co-authored-by: Qi --- kong/router/atc.lua | 6 +++++- kong/router/compat.lua | 17 ++++++++++++++--- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/kong/router/atc.lua b/kong/router/atc.lua index 225a9eaaaa8..a067a914e29 100644 --- a/kong/router/atc.lua +++ b/kong/router/atc.lua @@ -130,7 +130,11 @@ local function gen_for_field(name, op, vals, val_transform) end -- consume the whole buffer - return values_buf:put(")"):get() + -- returns a local variable instead of using a tail call + -- to avoid NYI + local str = values_buf:put(")"):get() + + return str end diff --git a/kong/router/compat.lua b/kong/router/compat.lua index e09f84966de..df4285f21db 100644 --- a/kong/router/compat.lua +++ b/kong/router/compat.lua @@ -137,7 +137,11 @@ local function gen_for_nets(ip_field, port_field, vals) ::continue:: end -- for - return nets_buf:put(")"):get() + local str = nets_buf:put(")"):get() + + -- returns a local variable instead of using a tail call + -- to avoid NYI + return str end @@ -188,7 +192,10 @@ local function get_expression(route) end if src_gen or dst_gen then - return expr_buf:get() + -- returns a local variable instead of using a tail call + -- to avoid NYI + local str = expr_buf:get() + return str end end @@ -272,7 +279,11 @@ local function get_expression(route) expression_append(expr_buf, LOGICAL_AND, headers_buf:get()) end - return expr_buf:get() + local str = expr_buf:get() + + -- returns a local variable instead of using a tail call + -- to avoid NYI + return str end From 365a0e53dfa6248971a62be2c88ef6a7123e2a95 Mon Sep 17 00:00:00 2001 From: Chrono Date: Tue, 27 Feb 2024 14:58:10 +0800 Subject: [PATCH 70/91] refactor(router/atc): simplify the code of atc router schema (#12395) --- kong/router/atc.lua | 123 ++++++++++++++++++++++++-------------------- 1 file changed, 66 insertions(+), 57 deletions(-) diff --git a/kong/router/atc.lua b/kong/router/atc.lua index a067a914e29..b186a1b29bb 100644 --- a/kong/router/atc.lua +++ b/kong/router/atc.lua @@ -3,12 +3,8 @@ local _MT = { __index = _M, } local buffer = require("string.buffer") -local schema = require("resty.router.schema") -local context = require("resty.router.context") -local router = require("resty.router.router") local lrucache = require("resty.lrucache") local tb_new = require("table.new") -local fields = require("kong.router.fields") local utils = require("kong.router.utils") local rat = require("kong.tools.request_aware_table") local yield = require("kong.tools.yield").yield @@ -52,10 +48,15 @@ local is_http = ngx.config.subsystem == "http" local values_buf = buffer.new(64) -local CACHED_SCHEMA -local HTTP_SCHEMA -local STREAM_SCHEMA +local get_atc_context +local get_atc_router +local get_atc_fields do + local schema = require("resty.router.schema") + local context = require("resty.router.context") + local router = require("resty.router.router") + local fields = require("kong.router.fields") + local function generate_schema(fields) local s = schema.new() @@ -69,11 +70,62 @@ do end -- used by validation - HTTP_SCHEMA = generate_schema(fields.HTTP_FIELDS) - STREAM_SCHEMA = generate_schema(fields.STREAM_FIELDS) + local HTTP_SCHEMA = generate_schema(fields.HTTP_FIELDS) + local STREAM_SCHEMA = generate_schema(fields.STREAM_FIELDS) -- used by running router - CACHED_SCHEMA = is_http and HTTP_SCHEMA or STREAM_SCHEMA + local CACHED_SCHEMA = is_http and HTTP_SCHEMA or STREAM_SCHEMA + + get_atc_context = function() + return context.new(CACHED_SCHEMA) + end + + get_atc_router = function(routes_n) + return router.new(CACHED_SCHEMA, routes_n) + end + + get_atc_fields = function(inst) + return fields.new(inst:get_fields()) + end + + local protocol_to_schema = { + http = HTTP_SCHEMA, + https = HTTP_SCHEMA, + grpc = HTTP_SCHEMA, + grpcs = HTTP_SCHEMA, + + tcp = STREAM_SCHEMA, + udp = STREAM_SCHEMA, + tls = STREAM_SCHEMA, + + tls_passthrough = STREAM_SCHEMA, + } + + -- for db schema validation + function _M.schema(protocols) + return assert(protocol_to_schema[protocols[1]]) + end + + -- for unit testing + function _M._set_ngx(mock_ngx) + if type(mock_ngx) ~= "table" then + return + end + + if mock_ngx.header then + header = mock_ngx.header + end + + if mock_ngx.var then + var = mock_ngx.var + end + + if mock_ngx.log then + ngx_log = mock_ngx.log + end + + fields._set_ngx(mock_ngx) + end end @@ -166,7 +218,7 @@ local function new_from_scratch(routes, get_exp_and_priority) local routes_n = #routes - local inst = router.new(CACHED_SCHEMA, routes_n) + local inst = get_atc_router(routes_n) local routes_t = tb_new(0, routes_n) local services_t = tb_new(0, routes_n) @@ -200,8 +252,8 @@ local function new_from_scratch(routes, get_exp_and_priority) end return setmetatable({ - context = context.new(CACHED_SCHEMA), - fields = fields.new(inst:get_fields()), + context = get_atc_context(), + fields = get_atc_fields(inst), router = inst, routes = routes_t, services = services_t, @@ -286,7 +338,7 @@ local function new_from_previous(routes, get_exp_and_priority, old_router) yield(true, phase) end - old_router.fields = fields.new(inst:get_fields()) + old_router.fields = get_atc_fields(inst) old_router.updated_at = new_updated_at old_router.rebuilding = false @@ -659,49 +711,6 @@ end end -- if is_http -function _M._set_ngx(mock_ngx) - if type(mock_ngx) ~= "table" then - return - end - - if mock_ngx.header then - header = mock_ngx.header - end - - if mock_ngx.var then - var = mock_ngx.var - end - - if mock_ngx.log then - ngx_log = mock_ngx.log - end - - -- unit testing - fields._set_ngx(mock_ngx) -end - - -do - local protocol_to_schema = { - http = HTTP_SCHEMA, - https = HTTP_SCHEMA, - grpc = HTTP_SCHEMA, - grpcs = HTTP_SCHEMA, - - tcp = STREAM_SCHEMA, - udp = STREAM_SCHEMA, - tls = STREAM_SCHEMA, - - tls_passthrough = STREAM_SCHEMA, - } - - -- for db schema validation - function _M.schema(protocols) - return assert(protocol_to_schema[protocols[1]]) - end -end - - _M.LOGICAL_OR = LOGICAL_OR _M.LOGICAL_AND = LOGICAL_AND From af2176148ab7f9f66a4f189bc066d73166e38f52 Mon Sep 17 00:00:00 2001 From: chronolaw Date: Thu, 25 Jan 2024 10:47:14 +0800 Subject: [PATCH 71/91] style lint --- kong/router/fields.lua | 190 ++++++++++++++++++++--------------------- 1 file changed, 95 insertions(+), 95 deletions(-) diff --git a/kong/router/fields.lua b/kong/router/fields.lua index d975ce465c8..e82893f4dd7 100644 --- a/kong/router/fields.lua +++ b/kong/router/fields.lua @@ -56,53 +56,53 @@ local STREAM_FIELDS = { local FIELDS_FUNCS = { - -- http.* + -- http.* - ["http.method"] = - function(params) - if not params.method then - params.method = get_method() - end + ["http.method"] = + function(params) + if not params.method then + params.method = get_method() + end - return params.method - end, + return params.method + end, - ["http.path"] = - function(params) - return params.uri - end, + ["http.path"] = + function(params) + return params.uri + end, - ["http.host"] = - function(params) - return params.host - end, + ["http.host"] = + function(params) + return params.host + end, - -- net.* + -- net.* - ["net.src.ip"] = - function(params) - if not params.src_ip then - params.src_ip = var.remote_addr - end + ["net.src.ip"] = + function(params) + if not params.src_ip then + params.src_ip = var.remote_addr + end - return params.src_ip - end, + return params.src_ip + end, - ["net.src.port"] = - function(params) - if not params.src_port then - params.src_port = tonumber(var.remote_port, 10) - end + ["net.src.port"] = + function(params) + if not params.src_port then + params.src_port = tonumber(var.remote_port, 10) + end - return params.src_port - end, + return params.src_port + end, - -- below are atc context only + -- below are atc context only - ["net.protocol"] = - function(params) - return params.scheme - end, + ["net.protocol"] = + function(params) + return params.scheme + end, } @@ -110,90 +110,90 @@ local is_http = ngx.config.subsystem == "http" if is_http then - -- tls.* - - FIELDS_FUNCS["tls.sni"] = - function(params) - if not params.sni then - params.sni = server_name() - end + -- tls.* - return params.sni + FIELDS_FUNCS["tls.sni"] = + function(params) + if not params.sni then + params.sni = server_name() end - -- net.* + return params.sni + end - FIELDS_FUNCS["net.dst.ip"] = - function(params) - if not params.dst_ip then - params.dst_ip = var.server_addr - end + -- net.* - return params.dst_ip + FIELDS_FUNCS["net.dst.ip"] = + function(params) + if not params.dst_ip then + params.dst_ip = var.server_addr end - FIELDS_FUNCS["net.dst.port"] = - function(params, ctx) - if params.port then - return params.port - end + return params.dst_ip + end - if not params.dst_port then - params.dst_port = tonumber((ctx or ngx.ctx).host_port, 10) or - tonumber(var.server_port, 10) - end + FIELDS_FUNCS["net.dst.port"] = + function(params, ctx) + if params.port then + return params.port + end - return params.dst_port + if not params.dst_port then + params.dst_port = tonumber((ctx or ngx.ctx).host_port, 10) or + tonumber(var.server_port, 10) end + return params.dst_port + end + else -- stream - -- tls.* - -- error value for non-TLS connections ignored intentionally - -- fallback to preread SNI if current connection doesn't terminate TLS + -- tls.* + -- error value for non-TLS connections ignored intentionally + -- fallback to preread SNI if current connection doesn't terminate TLS - FIELDS_FUNCS["tls.sni"] = - function(params) - if not params.sni then - params.sni = server_name() or var.ssl_preread_server_name - end - - return params.sni + FIELDS_FUNCS["tls.sni"] = + function(params) + if not params.sni then + params.sni = server_name() or var.ssl_preread_server_name end - -- net.* - -- when proxying TLS request in second layer or doing TLS passthrough - -- rewrite the dst_ip, port back to what specified in proxy_protocol + return params.sni + end - FIELDS_FUNCS["net.dst.ip"] = - function(params) - if not params.dst_ip then - if var.kong_tls_passthrough_block == "1" or var.ssl_protocol then - params.dst_ip = var.proxy_protocol_server_addr + -- net.* + -- when proxying TLS request in second layer or doing TLS passthrough + -- rewrite the dst_ip, port back to what specified in proxy_protocol - else - params.dst_ip = var.server_addr - end - end + FIELDS_FUNCS["net.dst.ip"] = + function(params) + if not params.dst_ip then + if var.kong_tls_passthrough_block == "1" or var.ssl_protocol then + params.dst_ip = var.proxy_protocol_server_addr - return params.dst_ip + else + params.dst_ip = var.server_addr + end end - FIELDS_FUNCS["net.dst.port"] = - function(params, ctx) - if not params.dst_port then - if var.kong_tls_passthrough_block == "1" or var.ssl_protocol then - params.dst_port = tonumber(var.proxy_protocol_server_port) + return params.dst_ip + end - else - params.dst_port = tonumber((ctx or ngx.ctx).host_port, 10) or - tonumber(var.server_port, 10) - end - end + FIELDS_FUNCS["net.dst.port"] = + function(params, ctx) + if not params.dst_port then + if var.kong_tls_passthrough_block == "1" or var.ssl_protocol then + params.dst_port = tonumber(var.proxy_protocol_server_port) - return params.dst_port + else + params.dst_port = tonumber((ctx or ngx.ctx).host_port, 10) or + tonumber(var.server_port, 10) + end end + return params.dst_port + end + end -- is_http From 11f6b5609d699a0bcfea81476a026638ddac8f33 Mon Sep 17 00:00:00 2001 From: chronolaw Date: Thu, 25 Jan 2024 10:49:31 +0800 Subject: [PATCH 72/91] clean fill_atc_context --- kong/router/fields.lua | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/kong/router/fields.lua b/kong/router/fields.lua index e82893f4dd7..126bbce671f 100644 --- a/kong/router/fields.lua +++ b/kong/router/fields.lua @@ -404,30 +404,26 @@ end local function visit_for_context(field, value, ctx) - local prefix = field:sub(1, PREFIX_LEN) - - if prefix == HTTP_HEADERS_PREFIX or prefix == HTTP_QUERIES_PREFIX then - local v_type = type(value) - - -- multiple values for a single query parameter, like /?foo=bar&foo=baz - if v_type == "table" then - for _, v in ipairs(value) do - local res, err = ctx:add_value(field, v) - if not res then - return nil, err - end + local v_type = type(value) + + -- multiple values for a single header/query parameter, like /?foo=bar&foo=baz + if v_type == "table" then + for _, v in ipairs(value) do + local res, err = ctx:add_value(field, v) + if not res then + return nil, err end - - return true - end -- if v_type - - -- the query parameter has only one value, like /?foo=bar - -- the query parameter has no value, like /?foo, - -- get_uri_arg will get a boolean `true` - -- we think it is equivalent to /?foo= - if v_type == "boolean" then - value = "" end + + return true + end -- if v_type + + -- the header/query parameter has only one value, like /?foo=bar + -- the query parameter has no value, like /?foo, + -- get_uri_arg will get a boolean `true` + -- we think it is equivalent to /?foo= + if v_type == "boolean" then + value = "" end return ctx:add_value(field, value) From 6c9f44ab9351c35c25c517925cd2289cd5bd9ab2 Mon Sep 17 00:00:00 2001 From: Guilherme Salazar Date: Tue, 27 Feb 2024 08:27:14 -0300 Subject: [PATCH 73/91] chore(ci): add commit-lint action Enforce commit message format. --- .github/workflows/commitlint.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/workflows/commitlint.yml diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml new file mode 100644 index 00000000000..0901434386e --- /dev/null +++ b/.github/workflows/commitlint.yml @@ -0,0 +1,12 @@ +name: commit-lint + +on: [push, pull_request] + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - uses: ahmadnassri/action-commit-lint@v2 + with: + config: conventional From 70ac29d08011d3a76aafc976e04b26d133b29dfb Mon Sep 17 00:00:00 2001 From: Michael Martin Date: Wed, 24 Jan 2024 10:46:42 -0800 Subject: [PATCH 74/91] fix(wasm): use singleton kong.dns client for wasm resolver bridge The original code was attempting to instantiate its own DNS client, which is really not possible given the singleton nature of the module. The correct and more maintainable behavior here is to explicitly reuse the global client instance at `kong.dns`. --- kong/runloop/wasm.lua | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/kong/runloop/wasm.lua b/kong/runloop/wasm.lua index 9bb697cdda1..e745b7f2cfb 100644 --- a/kong/runloop/wasm.lua +++ b/kong/runloop/wasm.lua @@ -32,7 +32,6 @@ local _M = { local utils = require "kong.tools.utils" -local dns = require "kong.tools.dns" local reports = require "kong.reports" local clear_tab = require "table.clear" local cjson = require "cjson.safe" @@ -835,9 +834,6 @@ end local function enable(kong_config) set_available_filters(kong_config.wasm_modules_parsed) - -- setup a DNS client for ngx_wasm_module - _G.dns_client = _G.dns_client or dns(kong_config) - proxy_wasm = proxy_wasm or require "resty.wasmx.proxy_wasm" register_property_handlers() @@ -889,6 +885,12 @@ function _M.init_worker() return true end + _G.dns_client = kong and kong.dns + + if not _G.dns_client then + return nil, "global kong.dns client is not initialized" + end + local ok, err = update_in_place() if not ok then return nil, err From 8acdb2939aa891608ac0244fecf2193080eefffe Mon Sep 17 00:00:00 2001 From: Michael Martin Date: Mon, 29 Jan 2024 14:05:48 -0800 Subject: [PATCH 75/91] chore(wasm): skip some initialization steps in CLI mode --- kong/runloop/wasm.lua | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/kong/runloop/wasm.lua b/kong/runloop/wasm.lua index e745b7f2cfb..8ea57e2042b 100644 --- a/kong/runloop/wasm.lua +++ b/kong/runloop/wasm.lua @@ -834,9 +834,11 @@ end local function enable(kong_config) set_available_filters(kong_config.wasm_modules_parsed) - proxy_wasm = proxy_wasm or require "resty.wasmx.proxy_wasm" + if not ngx.IS_CLI then + proxy_wasm = proxy_wasm or require "resty.wasmx.proxy_wasm" - register_property_handlers() + register_property_handlers() + end ENABLED = true STATUS = STATUS_ENABLED @@ -885,10 +887,12 @@ function _M.init_worker() return true end - _G.dns_client = kong and kong.dns + if not ngx.IS_CLI then + _G.dns_client = kong and kong.dns - if not _G.dns_client then - return nil, "global kong.dns client is not initialized" + if not _G.dns_client then + return nil, "global kong.dns client is not initialized" + end end local ok, err = update_in_place() From 6ead30227b1cf4927bd660698278452656a404a6 Mon Sep 17 00:00:00 2001 From: Xiaoyan Rao <270668624@qq.com> Date: Wed, 28 Feb 2024 02:05:53 +0800 Subject: [PATCH 76/91] fix(build): bazel install root not found when build_name is not kong-dev. (#12641) --- scripts/build-wasm-test-filters.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build-wasm-test-filters.sh b/scripts/build-wasm-test-filters.sh index 07c5ce887be..504a4ed0240 100755 --- a/scripts/build-wasm-test-filters.sh +++ b/scripts/build-wasm-test-filters.sh @@ -22,7 +22,7 @@ set -euo pipefail readonly BUILD_TARGET=wasm32-wasi readonly FIXTURE_PATH=${PWD}/spec/fixtures/proxy_wasm_filters -readonly INSTALL_ROOT=${PWD}/bazel-bin/build/kong-dev +readonly INSTALL_ROOT=${PWD}/bazel-bin/build/${BUILD_NAME:-kong-dev} readonly TARGET_DIR=${INSTALL_ROOT}/wasm-cargo-target readonly KONG_TEST_USER_CARGO_DISABLED=${KONG_TEST_USER_CARGO_DISABLED:-0} From dcf871146a8e2f89069f4c22b2abf556c8918f0e Mon Sep 17 00:00:00 2001 From: Chrono Date: Thu, 29 Feb 2024 10:47:08 +0800 Subject: [PATCH 77/91] refactor(router/atc): simplify searching for field accessor function (#12664) --- kong/router/fields.lua | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/kong/router/fields.lua b/kong/router/fields.lua index 126bbce671f..4294e84b760 100644 --- a/kong/router/fields.lua +++ b/kong/router/fields.lua @@ -199,6 +199,11 @@ end -- is_http -- stream subsystem needs not to generate func local function get_field_accessor(funcs, field) + local f = FIELDS_FUNCS[field] + if f then + return f + end + error("unknown router matching schema field: " .. field) end @@ -259,7 +264,7 @@ if is_http then get_field_accessor = function(funcs, field) - local f = funcs[field] + local f = FIELDS_FUNCS[field] or funcs[field] if f then return f end @@ -447,8 +452,7 @@ end function _M:get_value(field, params, ctx) - local func = FIELDS_FUNCS[field] or - get_field_accessor(self.funcs, field) + local func = get_field_accessor(self.funcs, field) return func(params, ctx) end From 094ac13d562af3f1359ddc8c833a335b9c926c25 Mon Sep 17 00:00:00 2001 From: Guilherme Salazar Date: Wed, 28 Feb 2024 10:14:00 -0300 Subject: [PATCH 78/91] Revert "chore(ci): add commit-lint action" This reverts commit 6c9f44ab9351c35c25c517925cd2289cd5bd9ab2. --- .github/workflows/commitlint.yml | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 .github/workflows/commitlint.yml diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml deleted file mode 100644 index 0901434386e..00000000000 --- a/.github/workflows/commitlint.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: commit-lint - -on: [push, pull_request] - -jobs: - lint: - runs-on: ubuntu-latest - - steps: - - uses: ahmadnassri/action-commit-lint@v2 - with: - config: conventional From 55358dfc675ed499afd72154e14c15b2db8399d5 Mon Sep 17 00:00:00 2001 From: Jun Ouyang Date: Thu, 29 Feb 2024 16:05:24 +0800 Subject: [PATCH 79/91] fix(build): revert OpenResty `ngx.req.read_body()` HTTP/2 chunked encoding limitation (#12658) Cherry picked from https://github.com/openresty/lua-nginx-module/pull/2286. It was acknowledged by OpenResty as a mistaken breaking change, and we should revert it. FTI-5766 FTI-5795 --- ..._revert_req_body_hardcode_limitation.patch | 320 ++++++++++++++++++ .../kong/revert-req-body-limitation-patch.yml | 3 + t/04-patch/02-ngx-read-body-block.t | 49 +++ 3 files changed, 372 insertions(+) create mode 100644 build/openresty/patches/nginx-1.25.3_03-http_revert_req_body_hardcode_limitation.patch create mode 100644 changelog/unreleased/kong/revert-req-body-limitation-patch.yml create mode 100644 t/04-patch/02-ngx-read-body-block.t diff --git a/build/openresty/patches/nginx-1.25.3_03-http_revert_req_body_hardcode_limitation.patch b/build/openresty/patches/nginx-1.25.3_03-http_revert_req_body_hardcode_limitation.patch new file mode 100644 index 00000000000..00a38352402 --- /dev/null +++ b/build/openresty/patches/nginx-1.25.3_03-http_revert_req_body_hardcode_limitation.patch @@ -0,0 +1,320 @@ +diff --git a/bundle/ngx_lua-0.10.26/README.markdown b/bundle/ngx_lua-0.10.26/README.markdown +index d6ec8c9..02eb9af 100644 +--- a/bundle/ngx_lua-0.10.26/README.markdown ++++ b/bundle/ngx_lua-0.10.26/README.markdown +@@ -2722,8 +2722,6 @@ lua_need_request_body + + **phase:** *depends on usage* + +-Due to the stream processing feature of HTTP/2 or HTTP/3, this configuration could potentially block the entire request. Therefore, this configuration is effective only when HTTP/2 or HTTP/3 requests send content-length header. For requests with versions lower than HTTP/2, this configuration can still be used without any problems. +- + Determines whether to force the request body data to be read before running rewrite/access/content_by_lua* or not. The Nginx core does not read the client request body by default and if request body data is required, then this directive should be turned `on` or the [ngx.req.read_body](#ngxreqread_body) function should be called within the Lua code. + + To read the request body data within the [$request_body](http://nginx.org/en/docs/http/ngx_http_core_module.html#var_request_body) variable, +@@ -5426,8 +5424,6 @@ Reads the client request body synchronously without blocking the Nginx event loo + local args = ngx.req.get_post_args() + ``` + +-Due to the stream processing feature of HTTP/2 or HTTP/3, this api could potentially block the entire request. Therefore, this api is effective only when HTTP/2 or HTTP/3 requests send content-length header. For requests with versions lower than HTTP/2, this api can still be used without any problems. +- + If the request body is already read previously by turning on [lua_need_request_body](#lua_need_request_body) or by using other modules, then this function does not run and returns immediately. + + If the request body has already been explicitly discarded, either by the [ngx.req.discard_body](#ngxreqdiscard_body) function or other modules, this function does not run and returns immediately. +@@ -5643,7 +5639,7 @@ Returns a read-only cosocket object that wraps the downstream connection. Only [ + + In case of error, `nil` will be returned as well as a string describing the error. + +-Due to the streaming nature of HTTP2 and HTTP3, this API cannot be used when the downstream connection is HTTP2 and HTTP3. ++**Note:** This method will block while waiting for client request body to be fully received. Block time depends on the [client_body_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout) directive and maximum body size specified by the [client_max_body_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) directive. If read timeout occurs or client body size exceeds the defined limit, this function will not return and `408 Request Time-out` or `413 Request Entity Too Large` response will be returned to the client instead. + + The socket object returned by this method is usually used to read the current request's body in a streaming fashion. Do not turn on the [lua_need_request_body](#lua_need_request_body) directive, and do not mix this call with [ngx.req.read_body](#ngxreqread_body) and [ngx.req.discard_body](#ngxreqdiscard_body). + +diff --git a/bundle/ngx_lua-0.10.26/doc/HttpLuaModule.wiki b/bundle/ngx_lua-0.10.26/doc/HttpLuaModule.wiki +index 305626c..0db9dd5 100644 +--- a/bundle/ngx_lua-0.10.26/doc/HttpLuaModule.wiki ++++ b/bundle/ngx_lua-0.10.26/doc/HttpLuaModule.wiki +@@ -4741,8 +4741,7 @@ Returns a read-only cosocket object that wraps the downstream connection. Only [ + + In case of error, nil will be returned as well as a string describing the error. + +-Due to the streaming nature of HTTP2 and HTTP3, this API cannot be used when the downstream connection is HTTP2 and HTTP3. +- ++'''Note:''' This method will block while waiting for client request body to be fully received. Block time depends on the [http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout client_body_timeout] directive and maximum body size specified by the [http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size client_max_body_size] directive. If read timeout occurs or client body size exceeds the defined limit, this function will not return and 408 Request Time-out or 413 Request Entity Too Large response will be returned to the client instead. + The socket object returned by this method is usually used to read the current request's body in a streaming fashion. Do not turn on the [[#lua_need_request_body|lua_need_request_body]] directive, and do not mix this call with [[#ngx.req.read_body|ngx.req.read_body]] and [[#ngx.req.discard_body|ngx.req.discard_body]]. + + If any request body data has been pre-read into the Nginx core request header buffer, the resulting cosocket object will take care of this to avoid potential data loss resulting from such pre-reading. +diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_accessby.c b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_accessby.c +index 2bf40aa..d40eab1 100644 +--- a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_accessby.c ++++ b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_accessby.c +@@ -137,26 +137,6 @@ ngx_http_lua_access_handler(ngx_http_request_t *r) + } + + if (llcf->force_read_body && !ctx->read_body_done) { +- +-#if (NGX_HTTP_V2) +- if (r->main->stream && r->headers_in.content_length_n < 0) { +- ngx_log_error(NGX_LOG_WARN, r->connection->log, 0, +- "disable lua_need_request_body, since " +- "http2 read_body may break http2 stream process"); +- goto done; +- } +-#endif +- +-#if (NGX_HTTP_V3) +- if (r->http_version == NGX_HTTP_VERSION_30 +- && r->headers_in.content_length_n < 0) +- { +- ngx_log_error(NGX_LOG_WARN, r->connection->log, 0, +- "disable lua_need_request_body, since " +- "http2 read_body may break http2 stream process"); +- goto done; +- } +-#endif + r->request_body_in_single_buf = 1; + r->request_body_in_persistent_file = 1; + r->request_body_in_clean_file = 1; +@@ -174,12 +154,6 @@ ngx_http_lua_access_handler(ngx_http_request_t *r) + } + } + +-#if defined(NGX_HTTP_V3) || defined(NGX_HTTP_V2) +- +-done: +- +-#endif +- + dd("calling access handler"); + return llcf->access_handler(r); + } +diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_contentby.c b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_contentby.c +index 2014d52..5e2ae55 100644 +--- a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_contentby.c ++++ b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_contentby.c +@@ -196,26 +196,6 @@ ngx_http_lua_content_handler(ngx_http_request_t *r) + } + + if (llcf->force_read_body && !ctx->read_body_done) { +- +-#if (NGX_HTTP_V2) +- if (r->main->stream && r->headers_in.content_length_n < 0) { +- ngx_log_error(NGX_LOG_WARN, r->connection->log, 0, +- "disable lua_need_request_body, since " +- "http2 read_body may break http2 stream process"); +- goto done; +- } +-#endif +- +-#if (NGX_HTTP_V3) +- if (r->http_version == NGX_HTTP_VERSION_30 +- && r->headers_in.content_length_n < 0) +- { +- ngx_log_error(NGX_LOG_WARN, r->connection->log, 0, +- "disable lua_need_request_body, since " +- "http2 read_body may break http2 stream process"); +- goto done; +- } +-#endif + r->request_body_in_single_buf = 1; + r->request_body_in_persistent_file = 1; + r->request_body_in_clean_file = 1; +@@ -234,12 +214,6 @@ ngx_http_lua_content_handler(ngx_http_request_t *r) + } + } + +-#if defined(NGX_HTTP_V3) || defined(NGX_HTTP_V2) +- +-done: +- +-#endif +- + dd("setting entered"); + + ctx->entered_content_phase = 1; +diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_req_body.c b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_req_body.c +index 61ab999..5d69735 100644 +--- a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_req_body.c ++++ b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_req_body.c +@@ -85,23 +85,6 @@ ngx_http_lua_ngx_req_read_body(lua_State *L) + return luaL_error(L, "request object not found"); + } + +-/* http2 read body may break http2 stream process */ +-#if (NGX_HTTP_V2) +- if (r->main->stream && r->headers_in.content_length_n < 0) { +- return luaL_error(L, "http2 requests are not supported" +- " without content-length header"); +- } +-#endif +- +-#if (NGX_HTTP_V3) +- if (r->http_version == NGX_HTTP_VERSION_30 +- && r->headers_in.content_length_n < 0) +- { +- return luaL_error(L, "http3 requests are not supported" +- " without content-length header"); +- } +-#endif +- + r->request_body_in_single_buf = 1; + r->request_body_in_persistent_file = 1; + r->request_body_in_clean_file = 1; +@@ -349,23 +332,6 @@ ngx_http_lua_ngx_req_get_body_file(lua_State *L) + return luaL_error(L, "request object not found"); + } + +-/* http2 read body may break http2 stream process */ +-#if (NGX_HTTP_V2) +- if (r->main->stream && r->headers_in.content_length_n < 0) { +- return luaL_error(L, "http2 requests are not supported" +- " without content-length header"); +- } +-#endif +- +-#if (NGX_HTTP_V3) +- if (r->http_version == NGX_HTTP_VERSION_30 +- && r->headers_in.content_length_n < 0) +- { +- return luaL_error(L, "http3 requests are not supported" +- " without content-length header"); +- } +-#endif +- + ngx_http_lua_check_fake_request(L, r); + + if (r->request_body == NULL || r->request_body->temp_file == NULL) { +diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_rewriteby.c b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_rewriteby.c +index c56bba5..4109f28 100644 +--- a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_rewriteby.c ++++ b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_rewriteby.c +@@ -140,12 +140,7 @@ ngx_http_lua_rewrite_handler(ngx_http_request_t *r) + return NGX_DONE; + } + +-/* http2 read body may break http2 stream process */ +-#if (NGX_HTTP_V2) +- if (llcf->force_read_body && !ctx->read_body_done && !r->main->stream) { +-#else + if (llcf->force_read_body && !ctx->read_body_done) { +-#endif + r->request_body_in_single_buf = 1; + r->request_body_in_persistent_file = 1; + r->request_body_in_clean_file = 1; +diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_server_rewriteby.c b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_server_rewriteby.c +index 997262e..be86069 100644 +--- a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_server_rewriteby.c ++++ b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_server_rewriteby.c +@@ -102,13 +102,8 @@ ngx_http_lua_server_rewrite_handler(ngx_http_request_t *r) + return NGX_DONE; + } + +-/* TODO: lscf do not have force_read_body +- * http2 read body may break http2 stream process */ +-#if (NGX_HTTP_V2) +- if (llcf->force_read_body && !ctx->read_body_done && !r->main->stream) { +-#else ++ /* TODO: lscf do not have force_read_body */ + if (llcf->force_read_body && !ctx->read_body_done) { +-#endif + r->request_body_in_single_buf = 1; + r->request_body_in_persistent_file = 1; + r->request_body_in_clean_file = 1; +diff --git a/bundle/ngx_lua-0.10.26/t/023-rewrite/request_body.t b/bundle/ngx_lua-0.10.26/t/023-rewrite/request_body.t +index 32c02e1..b867d3a 100644 +--- a/bundle/ngx_lua-0.10.26/t/023-rewrite/request_body.t ++++ b/bundle/ngx_lua-0.10.26/t/023-rewrite/request_body.t +@@ -170,26 +170,3 @@ Expect: 100-Continue + http finalize request: 500, "/echo_body?" a:1, c:2 + http finalize request: 500, "/echo_body?" a:1, c:0 + --- log_level: debug +---- skip_eval: 4:$ENV{TEST_NGINX_USE_HTTP3} +- +- +- +-=== TEST 9: test HTTP2 reading request body was disabled +---- config +- location /echo_body { +- lua_need_request_body on; +- rewrite_by_lua_block { +- ngx.print(ngx.var.request_body or "nil") +- } +- content_by_lua 'ngx.exit(ngx.OK)'; +- } +---- http2 +---- request eval +-"POST /echo_body +-hello\x00\x01\x02 +-world\x03\x04\xff" +---- more_headers +-Content-Length: +---- response_body eval +-"nil" +---- no_error_log +diff --git a/bundle/ngx_lua-0.10.26/t/024-access/request_body.t b/bundle/ngx_lua-0.10.26/t/024-access/request_body.t +index 0aa12c8..fa03195 100644 +--- a/bundle/ngx_lua-0.10.26/t/024-access/request_body.t ++++ b/bundle/ngx_lua-0.10.26/t/024-access/request_body.t +@@ -170,26 +170,3 @@ Expect: 100-Continue + http finalize request: 500, "/echo_body?" a:1, c:2 + http finalize request: 500, "/echo_body?" a:1, c:0 + --- log_level: debug +---- skip_eval: 4:$ENV{TEST_NGINX_USE_HTTP3} +- +- +- +-=== TEST 9: test HTTP2 reading request body was disabled +---- config +- location /echo_body { +- lua_need_request_body on; +- access_by_lua_block { +- ngx.print(ngx.var.request_body or "nil") +- } +- content_by_lua 'ngx.exit(ngx.OK)'; +- } +---- http2 +---- request eval +-"POST /echo_body +-hello\x00\x01\x02 +-world\x03\x04\xff" +---- more_headers +-Content-Length: +---- response_body eval +-"nil" +---- no_error_log +diff --git a/bundle/ngx_lua-0.10.26/t/044-req-body.t b/bundle/ngx_lua-0.10.26/t/044-req-body.t +index f4509e1..da3a28b 100644 +--- a/bundle/ngx_lua-0.10.26/t/044-req-body.t ++++ b/bundle/ngx_lua-0.10.26/t/044-req-body.t +@@ -7,7 +7,7 @@ log_level('warn'); + + repeat_each(2); + +-plan tests => repeat_each() * (blocks() * 4 + 56); ++plan tests => repeat_each() * (blocks() * 4 + 58 ); + + #no_diff(); + no_long_string(); +@@ -1774,23 +1774,3 @@ content length: 5 + --- no_error_log + [error] + [alert] +---- skip_eval: 4:$ENV{TEST_NGINX_USE_HTTP3} +- +- +- +-=== TEST 53: HTTP2 read buffered body was discarded +---- config +- location = /test { +- content_by_lua_block { +- local err = pcall(ngx.req.read_body()) +- ngx.say(err) +- } +- } +---- http2 +---- request +-POST /test +-hello, world +---- more_headers +-Content-Length: +---- error_code: 500 +---- error_log: http2 requests are not supported without content-length header diff --git a/changelog/unreleased/kong/revert-req-body-limitation-patch.yml b/changelog/unreleased/kong/revert-req-body-limitation-patch.yml new file mode 100644 index 00000000000..55da8ff9197 --- /dev/null +++ b/changelog/unreleased/kong/revert-req-body-limitation-patch.yml @@ -0,0 +1,3 @@ +message: revert the hard-coded limitation of the ngx.read_body() API in OpenResty upstreams' new versions when downstream connections are in HTTP/2 or HTTP/3 stream modes. +type: bugfix +scope: Core diff --git a/t/04-patch/02-ngx-read-body-block.t b/t/04-patch/02-ngx-read-body-block.t new file mode 100644 index 00000000000..a086b125704 --- /dev/null +++ b/t/04-patch/02-ngx-read-body-block.t @@ -0,0 +1,49 @@ +# vim:set ft= ts=4 sw=4 et fdm=marker: + +use Test::Nginx::Socket 'no_plan'; + +repeat_each(2); + +run_tests(); + +__DATA__ + +=== TEST 1: ngx.req.read_body() should work for HTTP2 GET requests that doesn't carry the content-length header +--- config + location = /test { + content_by_lua_block { + local ok, err = pcall(ngx.req.read_body) + ngx.say(ok, " err: ", err) + } + } +--- http2 +--- request +GET /test +hello, world +--- more_headers +Content-Length: +--- response_body +true err: nil +--- no_error_log +[error] +[alert] + +=== TEST 2: ngx.req.read_body() should work for HTTP2 POST requests that doesn't carry the content-length header +--- config + location = /test { + content_by_lua_block { + local ok, err = pcall(ngx.req.read_body) + ngx.say(ok, " err: ", err) + } + } +--- http2 +--- request +POST /test +hello, world +--- more_headers +Content-Length: +--- response_body +true err: nil +--- no_error_log +[error] +[alert] \ No newline at end of file From 9d68f16a44a42dc1fa51c2a94627084d44af6404 Mon Sep 17 00:00:00 2001 From: Zachary Hu <6426329+outsinre@users.noreply.github.com> Date: Thu, 29 Feb 2024 16:59:50 +0800 Subject: [PATCH 80/91] chore(package): add tzdata to deb images (#12609) The tzdata package supports standard timezone database info. So users can set or show convenient timezone info. Fix #FTI-5698 --- build/dockerfiles/deb.Dockerfile | 1 + changelog/unreleased/kong/add_tzdata.yml | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog/unreleased/kong/add_tzdata.yml diff --git a/build/dockerfiles/deb.Dockerfile b/build/dockerfiles/deb.Dockerfile index a55b3706fcf..c25cbadd5d5 100644 --- a/build/dockerfiles/deb.Dockerfile +++ b/build/dockerfiles/deb.Dockerfile @@ -20,6 +20,7 @@ COPY ${KONG_ARTIFACT_PATH}${KONG_ARTIFACT} /tmp/kong.deb RUN apt-get update \ && apt-get -y upgrade \ && apt-get -y autoremove \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y tzdata \ && apt-get install -y --no-install-recommends /tmp/kong.deb \ && rm -rf /var/lib/apt/lists/* \ && rm -rf /tmp/kong.deb \ diff --git a/changelog/unreleased/kong/add_tzdata.yml b/changelog/unreleased/kong/add_tzdata.yml new file mode 100644 index 00000000000..91c8df9c2ad --- /dev/null +++ b/changelog/unreleased/kong/add_tzdata.yml @@ -0,0 +1,3 @@ +message: | + Add package `tzdata` to DEB Docker image for convenient timezone setting. +type: dependency From 51f11a7f2cdecf2258e6963941bd073693cc2291 Mon Sep 17 00:00:00 2001 From: tzssangglass Date: Thu, 29 Feb 2024 17:48:59 +0800 Subject: [PATCH 81/91] chore(cd): update file permission of kong.logrotate (#12629) origin file permission of kong.logrotate is 664, but the correct file permission is 644 Fix: https://konghq.atlassian.net/browse/FTI-5756 --------- Signed-off-by: tzssangglass --- build/package/nfpm.yaml | 3 +++ .../unreleased/kong/fix-file-permission-of-logrotate.yml | 3 +++ scripts/explain_manifest/explain.py | 8 +++++--- scripts/explain_manifest/suites.py | 2 ++ 4 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 changelog/unreleased/kong/fix-file-permission-of-logrotate.yml diff --git a/build/package/nfpm.yaml b/build/package/nfpm.yaml index 388b7d0be89..2e0bbf0c691 100644 --- a/build/package/nfpm.yaml +++ b/build/package/nfpm.yaml @@ -42,6 +42,9 @@ contents: dst: /lib/systemd/system/kong.service - src: build/package/kong.logrotate dst: /etc/kong/kong.logrotate + file_info: + mode: 0644 + scripts: postinstall: ./build/package/postinstall.sh replaces: diff --git a/changelog/unreleased/kong/fix-file-permission-of-logrotate.yml b/changelog/unreleased/kong/fix-file-permission-of-logrotate.yml new file mode 100644 index 00000000000..2fb24c9e2f5 --- /dev/null +++ b/changelog/unreleased/kong/fix-file-permission-of-logrotate.yml @@ -0,0 +1,3 @@ +message: update file permission of kong.logrotate to 644 +type: bugfix +scope: Core diff --git a/scripts/explain_manifest/explain.py b/scripts/explain_manifest/explain.py index d9f807b2dc2..1916401024e 100644 --- a/scripts/explain_manifest/explain.py +++ b/scripts/explain_manifest/explain.py @@ -64,12 +64,14 @@ def __init__(self, path, relpath): # use lstat to get the mode, uid, gid of the symlink itself self.mode = os.lstat(path).st_mode + # unix style mode + self.file_mode = '0' + oct(self.mode & 0o777)[2:] self.uid = os.lstat(path).st_uid self.gid = os.lstat(path).st_gid if not Path(path).is_symlink(): self.size = os.stat(path).st_size - + self._lazy_evaluate_attrs.update({ "binary_content": lambda: open(path, "rb").read(), "text_content": lambda: open(path, "rb").read().decode('utf-8'), @@ -129,7 +131,7 @@ def __init__(self, path, relpath): binary = lief.parse(path) if not binary: # not an ELF file, malformed, etc return - + self.arch = binary.header.machine_type.name for d in binary.dynamic_entries: @@ -152,7 +154,7 @@ def __init__(self, path, relpath): self.version_requirement[f.name] = [LooseVersion( a.name) for a in f.get_auxiliary_symbols()] self.version_requirement[f.name].sort() - + self._lazy_evaluate_attrs.update({ "exported_symbols": self.get_exported_symbols, "imported_symbols": self.get_imported_symbols, diff --git a/scripts/explain_manifest/suites.py b/scripts/explain_manifest/suites.py index 89fb06ecfe2..daed3029939 100644 --- a/scripts/explain_manifest/suites.py +++ b/scripts/explain_manifest/suites.py @@ -19,6 +19,8 @@ def common_suites(expect, libxcrypt_no_obsolete_api: bool = False): expect("/etc/kong/kong.logrotate", "includes logrotate config").exists() + expect("/etc/kong/kong.logrotate", "logrotate config should have 0644 permissions").file_mode.equals("0644") + expect("/usr/local/kong/include/openssl/**.h", "includes OpenSSL headers").exists() # binary correctness From d43159afcec9212a4e214b7568c4922a832c1651 Mon Sep 17 00:00:00 2001 From: Wangchong Zhou Date: Thu, 29 Feb 2024 19:14:52 +0800 Subject: [PATCH 82/91] chore(deps): bump lua-resty-openssl to 1.2.1 (#12665) --- changelog/unreleased/kong/bump-lua-resty-openssl.yml | 3 +++ kong-3.7.0-0.rockspec | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/kong/bump-lua-resty-openssl.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-openssl.yml b/changelog/unreleased/kong/bump-lua-resty-openssl.yml new file mode 100644 index 00000000000..7e43d0456f7 --- /dev/null +++ b/changelog/unreleased/kong/bump-lua-resty-openssl.yml @@ -0,0 +1,3 @@ +message: "Bumped lua-resty-openssl to 1.2.1" +type: dependency +scope: Core diff --git a/kong-3.7.0-0.rockspec b/kong-3.7.0-0.rockspec index 61fa53a8f27..0d37a900f09 100644 --- a/kong-3.7.0-0.rockspec +++ b/kong-3.7.0-0.rockspec @@ -34,7 +34,7 @@ dependencies = { "lua-resty-healthcheck == 3.0.1", "lua-messagepack == 0.5.4", "lua-resty-aws == 1.3.6", - "lua-resty-openssl == 1.2.0", + "lua-resty-openssl == 1.2.1", "lua-resty-counter == 0.2.1", "lua-resty-ipmatcher == 0.6.1", "lua-resty-acme == 0.12.0", From 49c1ea021e3aaad8e083525bb65a3b421e4be44c Mon Sep 17 00:00:00 2001 From: Keery Nie Date: Tue, 5 Mar 2024 14:21:34 +0800 Subject: [PATCH 83/91] fix(cache): mlcache invalidation use separate cluster event channels to avoid useless invalidation (#12321) Currently, the kong_db_cache and kong_core_db_cache use the same invalidations channel named "invalidations" in the cluster event hook. In a traditional cluster(in which multiple Kong nodes use the same database and communicate with each other through cluster events), whenever an invalidation happens on a node A, any other single node X in the cluster will call invalidate_local on both kong_db_cache and kong_core_db_cache although the entity usually exists in only one cache, thus generates useless worker events. The PR tries to separate every kong.cache instance to use its own invalidations channel to avoid generating useless "invalidations" worker events in a traditional cluster. - Leave the existing channel "invalidation" only for kong.cache - Create a separate channel for the kong.core_cache(and other cache instances if any) * fix(cache): mlcache invalidation use split cluster event channels to avoid useless invalidation * docs(changelog): add changelog * fix(*): preserve old invalidate channel for kong_db_cache * docs(changelog): reword changelog * fix(*): stash invalidation channel in cache obj * docs(*): remove invalidate deprecate changelog * style(*): remove extra blank line --- ...che_invalidation_cluster_event_channel.yml | 4 ++++ kong/cache/init.lua | 15 +++++++++---- kong/global.lua | 21 ++++++++++--------- .../kong/plugins/invalidations/handler.lua | 4 ++++ 4 files changed, 30 insertions(+), 14 deletions(-) create mode 100644 changelog/unreleased/kong/separate_kong_cache_invalidation_cluster_event_channel.yml diff --git a/changelog/unreleased/kong/separate_kong_cache_invalidation_cluster_event_channel.yml b/changelog/unreleased/kong/separate_kong_cache_invalidation_cluster_event_channel.yml new file mode 100644 index 00000000000..ab0c68bc357 --- /dev/null +++ b/changelog/unreleased/kong/separate_kong_cache_invalidation_cluster_event_channel.yml @@ -0,0 +1,4 @@ +message: | + Each Kong cache instance now utilizes its own cluster event channel. This approach isolates cache invalidation events and reducing the generation of unnecessary worker events. +type: bugfix +scope: Core diff --git a/kong/cache/init.lua b/kong/cache/init.lua index dcf2d173c13..91b21c64a1a 100644 --- a/kong/cache/init.lua +++ b/kong/cache/init.lua @@ -86,6 +86,10 @@ function _M.new(opts) error("opts.resty_lock_opts must be a table", 2) end + if opts.invalidation_channel and type(opts.invalidation_channel) ~= "string" then + error("opts.invalidation_channel must be a string", 2) + end + local shm_name = opts.shm_name if not shared[shm_name] then log(ERR, "shared dictionary ", shm_name, " not found") @@ -131,6 +135,8 @@ function _M.new(opts) end local cluster_events = opts.cluster_events + local invalidation_channel = opts.invalidation_channel + or ("invalidations_" .. shm_name) local self = { cluster_events = cluster_events, mlcache = mlcache, @@ -138,10 +144,11 @@ function _M.new(opts) shm_name = shm_name, ttl = ttl, neg_ttl = neg_ttl, + invalidation_channel = invalidation_channel, } - local ok, err = cluster_events:subscribe("invalidations", function(key) - log(DEBUG, "received invalidate event from cluster for key: '", key, "'") + local ok, err = cluster_events:subscribe(self.invalidation_channel, function(key) + log(DEBUG, self.shm_name .. " received invalidate event from cluster for key: '", key, "'") self:invalidate_local(key) end) if not ok then @@ -230,7 +237,7 @@ function _M:invalidate_local(key) error("key must be a string", 2) end - log(DEBUG, "invalidating (local): '", key, "'") + log(DEBUG, self.shm_name, " invalidating (local): '", key, "'") local ok, err = self.mlcache:delete(key) if not ok then @@ -248,7 +255,7 @@ function _M:invalidate(key) log(DEBUG, "broadcasting (cluster) invalidation for key: '", key, "'") - local ok, err = self.cluster_events:broadcast("invalidations", key) + local ok, err = self.cluster_events:broadcast(self.invalidation_channel, key) if not ok then log(ERR, "failed to broadcast cached entity invalidation: ", err) end diff --git a/kong/global.lua b/kong/global.lua index ace19ae87fb..468f55bf821 100644 --- a/kong/global.lua +++ b/kong/global.lua @@ -249,16 +249,17 @@ function _GLOBAL.init_cache(kong_config, cluster_events, worker_events) end return kong_cache.new({ - shm_name = "kong_db_cache", - cluster_events = cluster_events, - worker_events = worker_events, - ttl = db_cache_ttl, - neg_ttl = db_cache_neg_ttl or db_cache_ttl, - resurrect_ttl = kong_config.resurrect_ttl, - page = page, - cache_pages = cache_pages, - resty_lock_opts = LOCK_OPTS, - lru_size = get_lru_size(kong_config), + shm_name = "kong_db_cache", + cluster_events = cluster_events, + worker_events = worker_events, + ttl = db_cache_ttl, + neg_ttl = db_cache_neg_ttl or db_cache_ttl, + resurrect_ttl = kong_config.resurrect_ttl, + page = page, + cache_pages = cache_pages, + resty_lock_opts = LOCK_OPTS, + lru_size = get_lru_size(kong_config), + invalidation_channel = "invalidations", }) end diff --git a/spec/fixtures/custom_plugins/kong/plugins/invalidations/handler.lua b/spec/fixtures/custom_plugins/kong/plugins/invalidations/handler.lua index 91ccfd67e5a..059a96b61c6 100644 --- a/spec/fixtures/custom_plugins/kong/plugins/invalidations/handler.lua +++ b/spec/fixtures/custom_plugins/kong/plugins/invalidations/handler.lua @@ -15,6 +15,10 @@ function Invalidations:init_worker() assert(kong.cluster_events:subscribe("invalidations", function(key) counts[key] = (counts[key] or 0) + 1 end)) + + assert(kong.cluster_events:subscribe("invalidations_kong_core_db_cache", function(key) + counts[key] = (counts[key] or 0) + 1 + end)) end From 29285c3867038b66a57591ae09b640f92c35c4a0 Mon Sep 17 00:00:00 2001 From: Chrono Date: Tue, 5 Mar 2024 15:24:14 +0800 Subject: [PATCH 84/91] refactor(router/atc): simplify cache key calculation (#12481) --- kong/router/fields.lua | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/kong/router/fields.lua b/kong/router/fields.lua index 4294e84b760..40dd85609f7 100644 --- a/kong/router/fields.lua +++ b/kong/router/fields.lua @@ -8,7 +8,6 @@ local tonumber = tonumber local setmetatable = setmetatable local tb_sort = table.sort local tb_concat = table.concat -local replace_dashes_lower = require("kong.tools.string").replace_dashes_lower local var = ngx.var @@ -373,36 +372,21 @@ if is_http then end -- is_http +-- the fields returned from atc-router have fixed order and name +-- traversing these fields will always get a decided result (for one router instance) +-- so we need not to add field's name in cache key now local function visit_for_cache_key(field, value, str_buf) -- these fields were not in cache key if field == "net.protocol" then return true end - local headers_or_queries = field:sub(1, PREFIX_LEN) - - if headers_or_queries == HTTP_HEADERS_PREFIX then - headers_or_queries = true - field = replace_dashes_lower(field) - - elseif headers_or_queries == HTTP_QUERIES_PREFIX then - headers_or_queries = true - - else - headers_or_queries = false + if type(value) == "table" then + tb_sort(value) + value = tb_concat(value, ",") end - if not headers_or_queries then - str_buf:put(value or "", "|") - - else -- headers or queries - if type(value) == "table" then - tb_sort(value) - value = tb_concat(value, ",") - end - - str_buf:putf("%s=%s|", field, value or "") - end + str_buf:putf("%s|", value or "") return true end @@ -483,7 +467,11 @@ function _M:get_cache_key(params, ctx) visit_for_cache_key, str_buf) assert(res) - return str_buf:get() + local str = str_buf:get() + + -- returns a local variable instead of using a tail call + -- to avoid NYI + return str end From e077cae5b964bd130e449fab5a58d1b3d5af2811 Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Tue, 5 Mar 2024 10:26:52 +0200 Subject: [PATCH 85/91] chore(patches): fix pcre2 regex memory corruption issues (#12687) * chore(patches): fix pcre2 regex memory corruption issues ### Summary Adds couple of patches that were recently merged to upstream project. This should finally fix test issues with PCRE2 on EE master. * chore(patches): remove useless pcre config on stream module ### Summary Just a small patch from the upstream: https://github.com/openresty/stream-lua-nginx-module/commit/f1499e3b06f698dc2813e0686aa0cc257299fcd7 --- ...a-0.10.26_03-regex-memory-corruption.patch | 38 ++++++++++++ ...0.0.14_02-remove-useless-pcre-config.patch | 59 ++++++++++++++++++ ...ua-0.0.14_03-regex-memory-corruption.patch | 60 +++++++++++++++++++ 3 files changed, 157 insertions(+) create mode 100644 build/openresty/patches/ngx_lua-0.10.26_03-regex-memory-corruption.patch create mode 100644 build/openresty/patches/ngx_stream_lua-0.0.14_02-remove-useless-pcre-config.patch create mode 100644 build/openresty/patches/ngx_stream_lua-0.0.14_03-regex-memory-corruption.patch diff --git a/build/openresty/patches/ngx_lua-0.10.26_03-regex-memory-corruption.patch b/build/openresty/patches/ngx_lua-0.10.26_03-regex-memory-corruption.patch new file mode 100644 index 00000000000..1c40fd5fa57 --- /dev/null +++ b/build/openresty/patches/ngx_lua-0.10.26_03-regex-memory-corruption.patch @@ -0,0 +1,38 @@ +diff --git a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_regex.c b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_regex.c +index 1b52fa2..30c1650 100644 +--- a/bundle/ngx_lua-0.10.26/src/ngx_http_lua_regex.c ++++ b/bundle/ngx_lua-0.10.26/src/ngx_http_lua_regex.c +@@ -688,11 +688,11 @@ ngx_http_lua_ffi_exec_regex(ngx_http_lua_regex_t *re, int flags, + ngx_pool_t *old_pool; + + if (flags & NGX_LUA_RE_MODE_DFA) { +- ovecsize = 2; ++ ovecsize = 1; + re->ncaptures = 0; + + } else { +- ovecsize = (re->ncaptures + 1) * 3; ++ ovecsize = re->ncaptures + 1; + } + + old_pool = ngx_http_lua_pcre_malloc_init(NULL); +@@ -710,7 +710,7 @@ ngx_http_lua_ffi_exec_regex(ngx_http_lua_regex_t *re, int flags, + } + + ngx_regex_match_data_size = ovecsize; +- ngx_regex_match_data = pcre2_match_data_create(ovecsize / 3, NULL); ++ ngx_regex_match_data = pcre2_match_data_create(ovecsize, NULL); + + if (ngx_regex_match_data == NULL) { + rc = PCRE2_ERROR_NOMEMORY; +@@ -756,8 +756,8 @@ ngx_http_lua_ffi_exec_regex(ngx_http_lua_regex_t *re, int flags, + "n %ui, ovecsize %ui", flags, exec_opts, rc, n, ovecsize); + #endif + +- if (!(flags & NGX_LUA_RE_MODE_DFA) && n > ovecsize / 3) { +- n = ovecsize / 3; ++ if (n > ovecsize) { ++ n = ovecsize; + } + + for (i = 0; i < n; i++) { diff --git a/build/openresty/patches/ngx_stream_lua-0.0.14_02-remove-useless-pcre-config.patch b/build/openresty/patches/ngx_stream_lua-0.0.14_02-remove-useless-pcre-config.patch new file mode 100644 index 00000000000..1e706fc6c3e --- /dev/null +++ b/build/openresty/patches/ngx_stream_lua-0.0.14_02-remove-useless-pcre-config.patch @@ -0,0 +1,59 @@ +From f1499e3b06f698dc2813e0686aa0cc257299fcd7 Mon Sep 17 00:00:00 2001 +From: swananan +Date: Thu, 11 Jan 2024 08:46:17 +0800 +Subject: [PATCH] changes: remove the useless pcre config. + +--- + config | 39 --------------------------------------- + 1 file changed, 39 deletions(-) + +diff --git a/bundle/ngx_stream_lua-0.0.14/config b/bundle/ngx_stream_lua-0.0.14/config +index 8db90628..e1470b7a 100644 +--- a/bundle/ngx_stream_lua-0.0.14/config ++++ b/bundle/ngx_stream_lua-0.0.14/config +@@ -405,45 +405,6 @@ fi + + # ---------------------------------------- + +-if [ $USE_PCRE = YES -o $PCRE != NONE ] && [ $PCRE != NO -a $PCRE != YES ] && [ $PCRE2 != YES ]; then +- # force pcre_version symbol to be required when PCRE is statically linked +- case "$NGX_PLATFORM" in +- Darwin:*) +- ngx_feature="require defined symbols (-u)" +- ngx_feature_name= +- ngx_feature_path= +- ngx_feature_libs="-Wl,-u,_strerror" +- ngx_feature_run=no +- ngx_feature_incs="#include " +- ngx_feature_test='printf("hello");' +- +- . auto/feature +- +- if [ $ngx_found = yes ]; then +- CORE_LIBS="-Wl,-u,_pcre_version $CORE_LIBS" +- fi +- ;; +- +- *) +- ngx_feature="require defined symbols (--require-defined)" +- ngx_feature_name= +- ngx_feature_path= +- ngx_feature_libs="-Wl,--require-defined=strerror" +- ngx_feature_run=no +- ngx_feature_incs="#include " +- ngx_feature_test='printf("hello");' +- +- . auto/feature +- +- if [ $ngx_found = yes ]; then +- CORE_LIBS="-Wl,--require-defined=pcre_version $CORE_LIBS" +- fi +- ;; +- esac +-fi +- +-# ---------------------------------------- +- + USE_MD5=YES + USE_SHA1=YES + diff --git a/build/openresty/patches/ngx_stream_lua-0.0.14_03-regex-memory-corruption.patch b/build/openresty/patches/ngx_stream_lua-0.0.14_03-regex-memory-corruption.patch new file mode 100644 index 00000000000..197a0e054b8 --- /dev/null +++ b/build/openresty/patches/ngx_stream_lua-0.0.14_03-regex-memory-corruption.patch @@ -0,0 +1,60 @@ +diff --git a/bundle/ngx_stream_lua-0.0.14/src/ngx_stream_lua_regex.c b/bundle/ngx_stream_lua-0.0.14/src/ngx_stream_lua_regex.c +index e32744e..241ec00 100644 +--- a/bundle/ngx_stream_lua-0.0.14/src/ngx_stream_lua_regex.c ++++ b/bundle/ngx_stream_lua-0.0.14/src/ngx_stream_lua_regex.c +@@ -695,11 +695,11 @@ ngx_stream_lua_ffi_exec_regex(ngx_stream_lua_regex_t *re, int flags, + ngx_pool_t *old_pool; + + if (flags & NGX_LUA_RE_MODE_DFA) { +- ovecsize = 2; ++ ovecsize = 1; + re->ncaptures = 0; + + } else { +- ovecsize = (re->ncaptures + 1) * 3; ++ ovecsize = re->ncaptures + 1; + } + + old_pool = ngx_stream_lua_pcre_malloc_init(NULL); +@@ -717,7 +717,7 @@ ngx_stream_lua_ffi_exec_regex(ngx_stream_lua_regex_t *re, int flags, + } + + ngx_regex_match_data_size = ovecsize; +- ngx_regex_match_data = pcre2_match_data_create(ovecsize / 3, NULL); ++ ngx_regex_match_data = pcre2_match_data_create(ovecsize, NULL); + + if (ngx_regex_match_data == NULL) { + rc = PCRE2_ERROR_NOMEMORY; +@@ -762,8 +762,8 @@ ngx_stream_lua_ffi_exec_regex(ngx_stream_lua_regex_t *re, int flags, + "n %ui, ovecsize %ui", flags, exec_opts, rc, n, ovecsize); + #endif + +- if (!(flags & NGX_LUA_RE_MODE_DFA) && n > ovecsize / 3) { +- n = ovecsize / 3; ++ if (n > ovecsize) { ++ n = ovecsize; + } + + for (i = 0; i < n; i++) { +@@ -796,6 +796,21 @@ ngx_stream_lua_ffi_exec_regex(ngx_stream_lua_regex_t *re, int flags, + re->ncaptures = 0; + + } else { ++ /* How pcre_exec() returns captured substrings ++ * The first two-thirds of the vector is used to pass back captured ++ * substrings, each substring using a pair of integers. The remaining ++ * third of the vector is used as workspace by pcre_exec() while ++ * matching capturing subpatterns, and is not available for passing ++ * back information. The number passed in ovecsize should always be a ++ * multiple of three. If it is not, it is rounded down. ++ * ++ * When a match is successful, information about captured substrings is ++ * returned in pairs of integers, starting at the beginning of ovector, ++ * and continuing up to two-thirds of its length at the most. The first ++ * element of each pair is set to the byte offset of the first character ++ * in a substring, and the second is set to the byte offset of the first ++ * character after the end of a substring. ++ */ + ovecsize = (re->ncaptures + 1) * 3; + } + From 793e632f38663332701acc7abbc44f6c95f245ce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Mar 2024 11:14:36 +0200 Subject: [PATCH 86/91] chore(deps): bump actions/cache from 3 to 4 (#12387) Bumps [actions/cache](https://github.com/actions/cache) from 3 to 4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/autodocs.yml | 4 ++-- .github/workflows/build.yml | 2 +- .github/workflows/build_and_test.yml | 6 +++--- .github/workflows/perf.yml | 4 ++-- .github/workflows/release.yml | 4 ++-- .github/workflows/upgrade-tests.yml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/autodocs.yml b/.github/workflows/autodocs.yml index 12dcea67243..baf03c474da 100644 --- a/.github/workflows/autodocs.yml +++ b/.github/workflows/autodocs.yml @@ -35,7 +35,7 @@ jobs: uses: actions/checkout@v4 - name: Lookup build cache - uses: actions/cache@v3 + uses: actions/cache@v4 id: cache-deps with: path: ${{ env.INSTALL_ROOT }} @@ -94,7 +94,7 @@ jobs: ref: ${{ github.event.inputs.target_branch }} - name: Lookup build cache - uses: actions/cache@v3 + uses: actions/cache@v4 id: cache-deps with: path: ${{ env.INSTALL_ROOT }} diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 88704ccdedc..b815a183274 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -37,7 +37,7 @@ jobs: - name: Lookup build cache id: cache-deps - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ env.BUILD_ROOT }} key: ${{ steps.cache-key.outputs.cache-key }} diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 8cb47a16550..89614e85698 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -59,7 +59,7 @@ jobs: - name: Lookup build cache id: cache-deps - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ env.BUILD_ROOT }} key: ${{ needs.build.outputs.cache-key }} @@ -187,7 +187,7 @@ jobs: - name: Lookup build cache id: cache-deps - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ env.BUILD_ROOT }} key: ${{ needs.build.outputs.cache-key }} @@ -345,7 +345,7 @@ jobs: - name: Lookup build cache id: cache-deps - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ env.BUILD_ROOT }} key: ${{ needs.build.outputs.cache-key }} diff --git a/.github/workflows/perf.yml b/.github/workflows/perf.yml index d71b8851903..7bc69ee2bfe 100644 --- a/.github/workflows/perf.yml +++ b/.github/workflows/perf.yml @@ -42,7 +42,7 @@ jobs: - name: Lookup build cache id: cache-deps - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ env.BUILD_ROOT }} key: ${{ steps.cache-key.outputs.cache-key }} @@ -118,7 +118,7 @@ jobs: - name: Load Cached Packages id: cache-deps if: env.GHA_CACHE == 'true' - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ env.BUILD_ROOT }} key: ${{ needs.build-packages.outputs.cache-key }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4a40ff4d3ae..bf074de740a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -132,7 +132,7 @@ jobs: - name: Cache Git id: cache-git if: (matrix.package == 'rpm' || matrix.image == 'debian:10') && matrix.image != '' - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: /usr/local/git key: ${{ matrix.label }}-git-2.41.0 @@ -193,7 +193,7 @@ jobs: - name: Cache Packages id: cache-deps if: env.GHA_CACHE == 'true' - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: bazel-bin/pkg key: ${{ steps.cache-key.outputs.cache-key }} diff --git a/.github/workflows/upgrade-tests.yml b/.github/workflows/upgrade-tests.yml index 96effbccc5f..d3c75d916a6 100644 --- a/.github/workflows/upgrade-tests.yml +++ b/.github/workflows/upgrade-tests.yml @@ -47,7 +47,7 @@ jobs: - name: Lookup build cache id: cache-deps - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ env.BUILD_ROOT }} key: ${{ needs.build.outputs.cache-key }} From 336849d5588788691c8190c162377ca5065a73c7 Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Tue, 5 Mar 2024 11:46:29 +0200 Subject: [PATCH 87/91] refactor(tracing): simplified the dynamic hooks code (#12461) Signed-off-by: Aapo Talvensaari --- kong-3.7.0-0.rockspec | 1 - kong/dynamic_hook/init.lua | 222 +++++++++++++++++------ kong/dynamic_hook/wrap_function_gen.lua | 224 ------------------------ kong/init.lua | 70 ++++---- kong/resty/dns/client.lua | 2 +- kong/runloop/handler.lua | 6 +- kong/timing/init.lua | 17 +- 7 files changed, 223 insertions(+), 319 deletions(-) delete mode 100644 kong/dynamic_hook/wrap_function_gen.lua diff --git a/kong-3.7.0-0.rockspec b/kong-3.7.0-0.rockspec index 0d37a900f09..e5e0e42a6cb 100644 --- a/kong-3.7.0-0.rockspec +++ b/kong-3.7.0-0.rockspec @@ -610,6 +610,5 @@ build = { ["kong.timing.hooks.socket"] = "kong/timing/hooks/socket.lua", ["kong.dynamic_hook"] = "kong/dynamic_hook/init.lua", - ["kong.dynamic_hook.wrap_function_gen"] = "kong/dynamic_hook/wrap_function_gen.lua", } } diff --git a/kong/dynamic_hook/init.lua b/kong/dynamic_hook/init.lua index f86f09311b3..d5cd940b0f1 100644 --- a/kong/dynamic_hook/init.lua +++ b/kong/dynamic_hook/init.lua @@ -1,19 +1,25 @@ -local warp_function_gen = require("kong.dynamic_hook.wrap_function_gen") - -local ngx = ngx - -local _M = { +local ngx = ngx +local type = type +local pcall = pcall +local select = select +local ipairs = ipairs +local assert = assert +local ngx_log = ngx.log +local ngx_WARN = ngx.WARN +local ngx_get_phase = ngx.get_phase + + +local _M = { TYPE = { BEFORE = 1, - AFTER = 2, + AFTER = 2, BEFORE_MUT = 3, - AFTER_MUT = 4, + AFTER_MUT = 4, }, } -local pcall = pcall -local non_function_hooks = { +local NON_FUNCTION_HOOKS = { --[[ [group_name] = { [hook_name] = , @@ -23,39 +29,152 @@ local non_function_hooks = { --]] } -local always_enabled_groups = {} - -local wrap_functions = { - [0] = warp_function_gen.generate_wrap_function(0), - [1] = warp_function_gen.generate_wrap_function(1), - [2] = warp_function_gen.generate_wrap_function(2), - [3] = warp_function_gen.generate_wrap_function(3), - [4] = warp_function_gen.generate_wrap_function(4), - [5] = warp_function_gen.generate_wrap_function(5), - [6] = warp_function_gen.generate_wrap_function(6), - [7] = warp_function_gen.generate_wrap_function(7), - [8] = warp_function_gen.generate_wrap_function(8), - ["varargs"] = warp_function_gen.generate_wrap_function("varargs"), -} + +local ALWAYS_ENABLED_GROUPS = {} + + +local function should_execute_original_func(group_name) + if ALWAYS_ENABLED_GROUPS[group_name] then + return + end + + local phase = ngx_get_phase() + if phase == "init" or phase == "init_worker" then + return true + end + + local dynamic_hook = ngx.ctx.dynamic_hook + if not dynamic_hook then + return true + end + + local enabled_groups = dynamic_hook.enabled_groups + if not enabled_groups[group_name] then + return true + end +end + + +local function execute_hook_vararg(hook, hook_type, group_name, ...) + if not hook then + return + end + local ok, err = pcall(hook, ...) + if not ok then + ngx_log(ngx_WARN, "failed to run ", hook_type, " hook of ", group_name, ": ", err) + end +end + + +local function execute_hooks_vararg(hooks, hook_type, group_name, ...) + if not hooks then + return + end + for _, hook in ipairs(hooks) do + execute_hook_vararg(hook, hook_type, group_name, ...) + end +end + + +local function execute_after_hooks_vararg(handlers, group_name, ...) + execute_hook_vararg(handlers.after_mut, "after_mut", group_name, ...) + execute_hooks_vararg(handlers.afters, "after", group_name, ...) + return ... +end + + +local function wrap_function_vararg(group_name, original_func, handlers) + return function (...) + if should_execute_original_func(group_name) then + return original_func(...) + end + execute_hooks_vararg(handlers.befores, "before", group_name, ...) + return execute_after_hooks_vararg(handlers, group_name, original_func(...)) + end +end + + +local function execute_hook(hook, hook_type, group_name, a1, a2, a3, a4, a5, a6, a7, a8) + if not hook then + return + end + local ok, err = pcall(hook, a1, a2, a3, a4, a5, a6, a7, a8) + if not ok then + ngx_log(ngx_WARN, "failed to run ", hook_type, " hook of ", group_name, ": ", err) + end +end + + +local function execute_hooks(hooks, hook_type, group_name, a1, a2, a3, a4, a5, a6, a7, a8) + if not hooks then + return + end + for _, hook in ipairs(hooks) do + execute_hook(hook, hook_type, group_name, a1, a2, a3, a4, a5, a6, a7, a8) + end +end + + +local function execute_original_func(max_args, original_func, a1, a2, a3, a4, a5, a6, a7, a8) + if max_args == 0 then + return original_func() + elseif max_args == 1 then + return original_func(a1) + elseif max_args == 2 then + return original_func(a1, a2) + elseif max_args == 3 then + return original_func(a1, a2, a3) + elseif max_args == 4 then + return original_func(a1, a2, a3, a4) + elseif max_args == 5 then + return original_func(a1, a2, a3, a4, a5) + elseif max_args == 6 then + return original_func(a1, a2, a3, a4, a5, a6) + elseif max_args == 7 then + return original_func(a1, a2, a3, a4, a5, a6, a7) + else + return original_func(a1, a2, a3, a4, a5, a6, a7, a8) + end +end + + +local function wrap_function(max_args, group_name, original_func, handlers) + return function(a1, a2, a3, a4, a5, a6, a7, a8) + if should_execute_original_func(group_name) then + a1, a2, a3, a4, a5, a6, a7, a8 = execute_original_func(max_args, original_func, a1, a2, a3, a4, a5, a6, a7, a8) + + else + execute_hook(handlers.before_mut, "before_mut", group_name, a1, a2, a3, a4, a5, a6, a7, a8) + execute_hooks(handlers.befores, "before", group_name, a1, a2, a3, a4, a5, a6, a7, a8) + a1, a2, a3, a4, a5, a6, a7, a8 = execute_original_func(max_args, original_func, a1, a2, a3, a4, a5, a6, a7, a8) + execute_hook(handlers.after_mut, "after_mut", group_name, a1, a2, a3, a4, a5, a6, a7, a8) + execute_hooks(handlers.afters, "after", group_name, a1, a2, a3, a4, a5, a6, a7, a8) + end + return a1, a2, a3, a4, a5, a6, a7, a8 + end +end function _M.hook_function(group_name, parent, child_key, max_args, handlers) assert(type(parent) == "table", "parent must be a table") assert(type(child_key) == "string", "child_key must be a string") - if type(max_args) == "string" then - assert(max_args == "varargs", "max_args must be a number or \"varargs\"") + local is_varargs = max_args == "varargs" + if is_varargs then assert(handlers.before_mut == nil, "before_mut is not supported for varargs functions") - else - assert(type(max_args) == "number", "max_args must be a number or \"varargs\"") - assert(max_args >= 0 and max_args <= 8, "max_args must be >= 0") + assert(type(max_args) == "number", 'max_args must be a number or "varargs"') + assert(max_args >= 0 and max_args <= 8, 'max_args must be >= 0 and <= 8, or "varargs"') end - local old_func = parent[child_key] - assert(type(old_func) == "function", "parent[" .. child_key .. "] must be a function") + local original_func = parent[child_key] + assert(type(original_func) == "function", "parent[" .. child_key .. "] must be a function") - parent[child_key] = wrap_functions[max_args](always_enabled_groups, group_name, old_func, handlers) + if is_varargs then + parent[child_key] = wrap_function_vararg(group_name, original_func, handlers) + else + parent[child_key] = wrap_function(max_args, group_name, original_func, handlers) + end end @@ -64,10 +183,10 @@ function _M.hook(group_name, hook_name, handler) assert(type(hook_name) == "string", "hook_name must be a string") assert(type(handler) == "function", "handler must be a function") - local hooks = non_function_hooks[group_name] + local hooks = NON_FUNCTION_HOOKS[group_name] if not hooks then hooks = {} - non_function_hooks[group_name] = hooks + NON_FUNCTION_HOOKS[group_name] = hooks end hooks[hook_name] = handler @@ -75,7 +194,7 @@ end function _M.is_group_enabled(group_name) - if always_enabled_groups[group_name] then + if ALWAYS_ENABLED_GROUPS[group_name] then return true end @@ -93,12 +212,12 @@ function _M.is_group_enabled(group_name) end -function _M.run_hooks(ctx, group_name, hook_name, ...) +function _M.run_hooks(group_name, hook_name, a1, a2, a3, a4, a5, a6, a7, a8, ...) if not _M.is_group_enabled(group_name) then return end - local hooks = non_function_hooks[group_name] + local hooks = NON_FUNCTION_HOOKS[group_name] if not hooks then return end @@ -108,30 +227,35 @@ function _M.run_hooks(ctx, group_name, hook_name, ...) return end - local ok, err = pcall(handler, ...) + local argc = select("#", ...) + local ok, err + if argc == 0 then + ok, err = pcall(handler, a1, a2, a3, a4, a5, a6, a7, a8) + else + ok, err = pcall(handler, a1, a2, a3, a4, a5, a6, a7, a8, ...) + end if not ok then - ngx.log(ngx.WARN, - string.format("failed to run dynamic hook %s.%s: %s", - group_name, hook_name, err)) + ngx_log(ngx_WARN, "failed to run dynamic hook ", group_name, ".", hook_name, ": ", err) end end -function _M.enable_on_this_request(group_name) - local info = ngx.ctx.dynamic_hook - if not info then - info = { - enabled_groups = {}, +function _M.enable_on_this_request(group_name, ngx_ctx) + ngx_ctx = ngx_ctx or ngx.ctx + if ngx_ctx.dynamic_hook then + ngx_ctx.dynamic_hook.enabled_groups[group_name] = true + else + ngx_ctx.dynamic_hook = { + enabled_groups = { + [group_name] = true + }, } - ngx.ctx.dynamic_hook = info end - - info.enabled_groups[group_name] = true end function _M.always_enable(group_name) - always_enabled_groups[group_name] = true + ALWAYS_ENABLED_GROUPS[group_name] = true end diff --git a/kong/dynamic_hook/wrap_function_gen.lua b/kong/dynamic_hook/wrap_function_gen.lua deleted file mode 100644 index dddddb55635..00000000000 --- a/kong/dynamic_hook/wrap_function_gen.lua +++ /dev/null @@ -1,224 +0,0 @@ -local ngx_get_phase = ngx.get_phase - -local TEMPLATE = [[ - return function(always_enabled_groups, group_name, original_func, handlers) - -- we cannot access upvalue here as this function is generated - local ngx = ngx - local ngx_get_phase = ngx.get_phase - - return function(%s) - if not always_enabled_groups[group_name] then - local phase = ngx_get_phase() - if phase == "init" or phase == "init_worker" then - return original_func(%s) - end - local dynamic_hook = ngx.ctx.dynamic_hook - if not dynamic_hook then - return original_func(%s) - end - - local enabled_groups = dynamic_hook.enabled_groups - if not enabled_groups[group_name] then - return original_func(%s) - end - end - - if handlers.before_mut then - local ok - ok, %s = pcall(handlers.before_mut, %s) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run before_mut hook of %%s: %%s", - group_name, a0)) - end - end - - if handlers.befores then - for _, func in ipairs(handlers.befores) do - local ok, err = pcall(func, %s) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run before hook of %%s: %%s", - group_name, err)) - end - end - end - - local r0, r1, r2, r3, r4, r5, r6, r7 = original_func(%s) - - if handlers.after_mut then - local ok, err = pcall(handlers.after_mut, r0, r1, r2, r3, r4, r5, r6, r7) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run after_mut hook of %%s: %%s", - group_name, err)) - end - end - - if handlers.afters then - for _, func in ipairs(handlers.afters) do - local ok, err = pcall(func, r0, r1, r2, r3, r4, r5, r6, r7) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run after hook of %%s: %%s", - group_name, err)) - end - end - end - - return r0, r1, r2, r3, r4, r5, r6, r7 - end - end -]] - - -local _M = {} - - -local function warp_function_0(always_enabled_groups, group_name, original_func, handlers) - return function() - if not always_enabled_groups[group_name] then - local phase = ngx_get_phase() - if phase == "init" or phase == "init_worker" then - return original_func() - end - - local dynamic_hook = ngx.ctx.dynamic_hook - if not dynamic_hook then - return original_func() - end - - local enabled_groups = dynamic_hook.enabled_groups - if not enabled_groups[group_name] then - return original_func() - end - end - - if handlers.before_mut then - local ok, err = pcall(handlers.before_mut) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run before_mut hook of %s: %s", - group_name, err)) - end - end - - if handlers.befores then - for _, func in ipairs(handlers.befores) do - local ok, err = pcall(func) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run before hook of %s: %s", - group_name, err)) - end - end - end - - local r0, r1, r2, r3, r4, r5, r6, r7 = original_func() - - if handlers.after_mut then - local ok, err = pcall(handlers.after_mut, r0, r1, r2, r3, r4, r5, r6, r7) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run after_mut hook of %s: %s", - group_name, err)) - end - end - - if handlers.afters then - for _, func in ipairs(handlers.afters) do - local ok, err = pcall(func, r0, r1, r2, r3, r4, r5, r6, r7) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run after hook of %s: %s", - group_name, err)) - end - end - end - - return r0, r1, r2, r3, r4, r5, r6, r7 - end -end - - -local function wrap_function_varargs(always_enabled_groups, group_name, original_func, handlers) - return function(...) - if not always_enabled_groups[group_name] then - local phase = ngx_get_phase() - if phase == "init" or phase == "init_worker" then - return original_func(...) - end - - local dynamic_hook = ngx.ctx.dynamic_hook - if not dynamic_hook then - return original_func(...) - end - - local enabled_groups = dynamic_hook.enabled_groups - if not enabled_groups[group_name] then - return original_func(...) - end - end - - -- before_mut is not supported for varargs functions - - if handlers.befores then - for _, func in ipairs(handlers.befores) do - local ok, err = pcall(func, ...) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run before hook of %s: %s", - group_name, err)) - end - end - end - - local r0, r1, r2, r3, r4, r5, r6, r7 = original_func(...) - - if handlers.after_mut then - local ok, err = pcall(handlers.after_mut, r0, r1, r2, r3, r4, r5, r6, r7) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run after_mut hook of %s: %s", - group_name, err)) - end - end - - if handlers.afters then - for _, func in ipairs(handlers.afters) do - local ok, err = pcall(func, r0, r1, r2, r3, r4, r5, r6, r7) - if not ok then - ngx.log(ngx.WARN, - string.format("failed to run after hook of %s: %s", - group_name, err)) - end - end - end - - return r0, r1, r2, r3, r4, r5, r6, r7 - end -end - - -function _M.generate_wrap_function(max_args) - if max_args == 0 then - return warp_function_0 - end - - if max_args == "varargs" then - return wrap_function_varargs - end - - local args = "a0" -- the 1st arg must be named as "a0" as - -- it will be used in the error log - - for i = 1, max_args - 1 do - args = args .. ", a" .. i - end - - local func = assert(loadstring(string.format(TEMPLATE, args, args, args, args, args, args, args, args)))() - assert(type(func) == "function", "failed to generate wrap function: " .. tostring(func)) - return func -end - -return _M \ No newline at end of file diff --git a/kong/init.lua b/kong/init.lua index d37a08325a0..2c837dd0e52 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -322,7 +322,7 @@ local function execute_global_plugins_iterator(plugins_iterator, phase, ctx) local has_timing = ctx.has_timing if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:plugin_iterator") + req_dyn_hook_run_hooks("timing", "before:plugin_iterator") end for _, plugin, configuration in iterator, plugins, 0 do @@ -334,13 +334,13 @@ local function execute_global_plugins_iterator(plugins_iterator, phase, ctx) setup_plugin_context(ctx, plugin, configuration) if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:plugin", plugin.name, ctx.plugin_id) + req_dyn_hook_run_hooks("timing", "before:plugin", plugin.name, ctx.plugin_id) end plugin.handler[phase](plugin.handler, configuration) if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:plugin") + req_dyn_hook_run_hooks("timing", "after:plugin") end reset_plugin_context(ctx, old_ws) @@ -351,7 +351,7 @@ local function execute_global_plugins_iterator(plugins_iterator, phase, ctx) end if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:plugin_iterator") + req_dyn_hook_run_hooks("timing", "after:plugin_iterator") end end @@ -372,7 +372,7 @@ local function execute_collecting_plugins_iterator(plugins_iterator, phase, ctx) local has_timing = ctx.has_timing if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:plugin_iterator") + req_dyn_hook_run_hooks("timing", "before:plugin_iterator") end for _, plugin, configuration in iterator, plugins, 0 do @@ -385,14 +385,14 @@ local function execute_collecting_plugins_iterator(plugins_iterator, phase, ctx) setup_plugin_context(ctx, plugin, configuration) if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:plugin", plugin.name, ctx.plugin_id) + req_dyn_hook_run_hooks( "timing", "before:plugin", plugin.name, ctx.plugin_id) end local co = coroutine.create(plugin.handler[phase]) local cok, cerr = coroutine.resume(co, plugin.handler, configuration) if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:plugin") + req_dyn_hook_run_hooks("timing", "after:plugin") end if not cok then @@ -422,7 +422,7 @@ local function execute_collecting_plugins_iterator(plugins_iterator, phase, ctx) end if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:plugin_iterator") + req_dyn_hook_run_hooks("timing", "after:plugin_iterator") end ctx.delay_response = nil @@ -443,7 +443,7 @@ local function execute_collected_plugins_iterator(plugins_iterator, phase, ctx) local has_timing = ctx.has_timing if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:plugin_iterator") + req_dyn_hook_run_hooks("timing", "before:plugin_iterator") end for _, plugin, configuration in iterator, plugins, 0 do @@ -455,13 +455,13 @@ local function execute_collected_plugins_iterator(plugins_iterator, phase, ctx) setup_plugin_context(ctx, plugin, configuration) if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:plugin", plugin.name, ctx.plugin_id) + req_dyn_hook_run_hooks("timing", "before:plugin", plugin.name, ctx.plugin_id) end plugin.handler[phase](plugin.handler, configuration) if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:plugin") + req_dyn_hook_run_hooks("timing", "after:plugin") end reset_plugin_context(ctx, old_ws) @@ -472,7 +472,7 @@ local function execute_collected_plugins_iterator(plugins_iterator, phase, ctx) end if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:plugin_iterator") + req_dyn_hook_run_hooks("timing", "after:plugin_iterator") end end @@ -1087,7 +1087,7 @@ function Kong.rewrite() ctx.KONG_PHASE = PHASES.rewrite local has_timing - req_dyn_hook_run_hooks(ctx, "timing:auth", "auth") + req_dyn_hook_run_hooks("timing:auth", "auth") if req_dyn_hook_is_group_enabled("timing") then ctx.has_timing = true @@ -1095,7 +1095,7 @@ function Kong.rewrite() end if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:rewrite") + req_dyn_hook_run_hooks("timing", "before:rewrite") end kong_resty_ctx.stash_ref(ctx) @@ -1124,7 +1124,7 @@ function Kong.rewrite() ctx.KONG_REWRITE_TIME = ctx.KONG_REWRITE_ENDED_AT - ctx.KONG_REWRITE_START if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:rewrite") + req_dyn_hook_run_hooks("timing", "after:rewrite") end end @@ -1134,7 +1134,7 @@ function Kong.access() local has_timing = ctx.has_timing if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:access") + req_dyn_hook_run_hooks("timing", "before:access") end if not ctx.KONG_ACCESS_START then @@ -1160,7 +1160,7 @@ function Kong.access() ctx.KONG_RESPONSE_LATENCY = ctx.KONG_ACCESS_ENDED_AT - ctx.KONG_PROCESSING_START if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:access") + req_dyn_hook_run_hooks("timing", "after:access") end return flush_delayed_response(ctx) @@ -1176,7 +1176,7 @@ function Kong.access() ctx.buffered_proxying = nil if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:access") + req_dyn_hook_run_hooks("timing", "after:access") end return kong.response.error(503, "no Service found with those values") @@ -1197,7 +1197,7 @@ function Kong.access() local upgrade = var.upstream_upgrade or "" if version < 2 and upgrade == "" then if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:access") + req_dyn_hook_run_hooks("timing", "after:access") end return Kong.response() @@ -1213,7 +1213,7 @@ function Kong.access() end if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:access") + req_dyn_hook_run_hooks("timing", "after:access") end end @@ -1223,7 +1223,7 @@ function Kong.balancer() local has_timing = ctx.has_timing if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:balancer") + req_dyn_hook_run_hooks("timing", "before:balancer") end -- This may be called multiple times, and no yielding here! @@ -1305,7 +1305,7 @@ function Kong.balancer() ctx.KONG_PROXY_LATENCY = ctx.KONG_BALANCER_ENDED_AT - ctx.KONG_PROCESSING_START if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:balancer") + req_dyn_hook_run_hooks("timing", "after:balancer") end return ngx.exit(errcode) @@ -1317,7 +1317,7 @@ function Kong.balancer() ngx_log(ngx_ERR, "failed to set balancer Host header: ", err) if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:balancer") + req_dyn_hook_run_hooks("timing", "after:balancer") end return ngx.exit(500) @@ -1372,7 +1372,7 @@ function Kong.balancer() ctx.KONG_PROXY_LATENCY = ctx.KONG_BALANCER_ENDED_AT - ctx.KONG_PROCESSING_START if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:balancer") + req_dyn_hook_run_hooks("timing", "after:balancer") end return ngx.exit(500) @@ -1412,7 +1412,7 @@ function Kong.balancer() ctx.KONG_PROXY_LATENCY = ctx.KONG_BALANCER_ENDED_AT - ctx.KONG_PROCESSING_START if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:balancer") + req_dyn_hook_run_hooks("timing", "after:balancer") end end @@ -1441,7 +1441,7 @@ do local has_timing = ctx.has_timing if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:response") + req_dyn_hook_run_hooks("timing", "before:response") end local plugins_iterator = runloop.get_plugins_iterator() @@ -1462,7 +1462,7 @@ do ngx.status = res.status or 502 if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:response") + req_dyn_hook_run_hooks("timing", "after:response") end return kong_error_handlers(ctx) @@ -1516,7 +1516,7 @@ do ngx.print(body) if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:response") + req_dyn_hook_run_hooks("timing", "after:response") end -- jump over the balancer to header_filter @@ -1530,7 +1530,7 @@ function Kong.header_filter() local has_timing = ctx.has_timing if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:header_filter") + req_dyn_hook_run_hooks("timing", "before:header_filter") end if not ctx.KONG_PROCESSING_START then @@ -1602,7 +1602,7 @@ function Kong.header_filter() ctx.KONG_HEADER_FILTER_TIME = ctx.KONG_HEADER_FILTER_ENDED_AT - ctx.KONG_HEADER_FILTER_START if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:header_filter") + req_dyn_hook_run_hooks("timing", "after:header_filter") end end @@ -1612,7 +1612,7 @@ function Kong.body_filter() local has_timing = ctx.has_timing if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:body_filter") + req_dyn_hook_run_hooks("timing", "before:body_filter") end if not ctx.KONG_BODY_FILTER_START then @@ -1671,7 +1671,7 @@ function Kong.body_filter() if not arg[2] then if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:body_filter") + req_dyn_hook_run_hooks("timing", "after:body_filter") end return @@ -1693,7 +1693,7 @@ function Kong.body_filter() end if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:body_filter") + req_dyn_hook_run_hooks("timing", "after:body_filter") end end @@ -1703,7 +1703,7 @@ function Kong.log() local has_timing = ctx.has_timing if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:log") + req_dyn_hook_run_hooks("timing", "before:log") end if not ctx.KONG_LOG_START then @@ -1798,7 +1798,7 @@ function Kong.log() runloop.log.after(ctx) if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:log") + req_dyn_hook_run_hooks("timing", "after:log") end release_table(CTX_NS, ctx) diff --git a/kong/resty/dns/client.lua b/kong/resty/dns/client.lua index fcc92a4217d..78cf91d29b5 100644 --- a/kong/resty/dns/client.lua +++ b/kong/resty/dns/client.lua @@ -143,7 +143,7 @@ local cachelookup = function(qname, qtype) local ctx = ngx.ctx if ctx and ctx.has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "dns:cache_lookup", cached ~= nil) + req_dyn_hook_run_hooks("timing", "dns:cache_lookup", cached ~= nil) end if cached then diff --git a/kong/runloop/handler.lua b/kong/runloop/handler.lua index e6cf91469f9..1a5f3a00a00 100644 --- a/kong/runloop/handler.lua +++ b/kong/runloop/handler.lua @@ -1112,7 +1112,7 @@ return { local has_timing = ctx.has_timing if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "before:router") + req_dyn_hook_run_hooks("timing", "before:router") end -- routing request @@ -1120,7 +1120,7 @@ return { local match_t = router:exec(ctx) if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "after:router") + req_dyn_hook_run_hooks("timing", "after:router") end if not match_t then @@ -1141,7 +1141,7 @@ return { ctx.workspace = match_t.route and match_t.route.ws_id if has_timing then - req_dyn_hook_run_hooks(ctx, "timing", "workspace_id:got", ctx.workspace) + req_dyn_hook_run_hooks("timing", "workspace_id:got", ctx.workspace) end local host = var.host diff --git a/kong/timing/init.lua b/kong/timing/init.lua index 9b9c5df3199..7f64d2e28bf 100644 --- a/kong/timing/init.lua +++ b/kong/timing/init.lua @@ -7,6 +7,8 @@ local ngx = ngx local ngx_var = ngx.var local ngx_req_set_header = ngx.req.set_header +local assert = assert +local ipairs = ipairs local string_format = string.format local request_id_get = require("kong.tracing.request_id").get @@ -62,7 +64,9 @@ function _M.auth() return end - assert(ngx.ctx.req_trace_id == nil) + local ngx_ctx = ngx.ctx + + assert(ngx_ctx.req_trace_id == nil) local http_x_kong_request_debug = ngx_var.http_x_kong_request_debug local http_x_kong_request_debug_token = ngx_var.http_x_kong_request_debug_token @@ -100,8 +104,8 @@ function _M.auth() loopback = loopback, }) ctx:set_context_prop("request_id", request_id_get()) - ngx.ctx.req_trace_ctx = ctx - req_dyn_hook.enable_on_this_request("timing") + ngx_ctx.req_trace_ctx = ctx + req_dyn_hook.enable_on_this_request("timing", ngx_ctx) end @@ -147,7 +151,8 @@ end function _M.header_filter() - local req_tr_ctx = ngx.ctx.req_trace_ctx + local ngx_ctx = ngx.ctx + local req_tr_ctx = ngx_ctx.req_trace_ctx req_tr_ctx:mock_upstream_phase() local output = req_tr_ctx:to_json() @@ -155,11 +160,11 @@ function _M.header_filter() if #output >= HEADER_JSON_TRUNCATE_LENGTH and not req_tr_ctx:from_loopback() then output = assert(cjson.encode({ truncated = true, - request_id = ngx.ctx.req_trace_ctx:get_root_context_kv("request_id"), + request_id = ngx_ctx.req_trace_ctx:get_root_context_kv("request_id"), message = "Output is truncated, please check the error_log for full output by filtering with the request_id.", })) - ngx.ctx.req_trace_ctx.log = true + ngx_ctx.req_trace_ctx.log = true end ngx.header["X-Kong-Request-Debug-Output"] = output From b5c4b05aaec1c333b2361f83bc1d6d4cdba3ac0c Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Wed, 28 Feb 2024 16:37:21 +0200 Subject: [PATCH 88/91] chore(deps): bump luarocks from 3.9.2 to 3.10.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Summary What's new in LuaRocks 3.10.0: * Features: * Introduce file-based locking for concurrent access control. Previously, LuaRocks would produce undefined behavior when running two instances at the same time. * Rockspec quality-of-life improvements: * Using an unknown `build.type` now automatically implies a build dependency for `luarocks-build-`. * Improve `rockspec.source.dir` autodetection. * `builtin` build mode now automatically inherits include and libdirs from `external_dependencies` if not set explicitly. * improved and simplified Lua interpreter search. * `lua_interpreter` config value is deprecated in favor of `variables.LUA` which contains the full interpreter path. * `luarocks-admin remove` now supports the `file://` protocol for managing local rocks servers. * Bundled dkjson library, so that `luarocks upload` does not require an external JSON library. * New flags for `luarocks init`: `--no-gitignore`, `--no-wrapper-scripts`, `--wrapper-dir`. * `luarocks config` now attempts updating the system config by default when `local_by_default` is `false`. * New flag for `luarocks path`: `--full`, for use with `--lr-path` and `--lr-cpath`. * Fixes: * various Windows-specific fixes: * `build.install_command` now works correctly on Windows. * do not attempt to set "executable" permissions for folders on Windows. * better handling of Windows backslash paths. * fix program search when using absolute paths and `.exe` files. * improved lookup order for library dependencies. * `LUALIB` filename detection is now done dynamically at runtime and not hardcoded by the Windows installer. * prevent LuaRocks from blocking `luafilesystem` from being removed on Windows. * `luarocks build` no longer looks for Lua headers when installing pure-Lua rocks. * `luarocks build` table in rockspecs now gets some additional validation to prevent crashes on malformed rockspecs. * `build.builtin` now compiles C modules in a temporary directory, avoiding name clashes * `build_dependencies` now correctly installs dependencies for the Lua version that LuaRocks is running on, and not the one it is building for with `--lua-version`. * `build_dependencies` can now use a dependency available in any rocks tree (system, user, project). * `luarocks config` now prints boolean values correctly on Lua 5.1. * `luarocks config` now ensures the target directory exists when saving a configuration. * `luarocks init` now injects the project's `package.(c)path` in the Lua wrapper. * `luarocks lint` no longer crashes if a rockspec misses a `description` field. * `luarocks test` now handles malformed `command` entries gracefully. * if `--lua-*` flags are given in the CLI, the hardcoded values are never used. * the "no downloader" error is now shown only once, and not once per failed mirror. * project dir is always presented normalized * catch the failure to setup `LUA_BINDIR` early. * when using `--pack-binary-rock` and a `zip` program is unavailable, report that instead of failing cryptically. * More graceful handling when failing to create a local cache. * Avoid confusion with macOS multiarch binaries on system detection. * Add `--tree` to the rocks trees list. * Better support for LuaJIT versions with extra suffixes in their version numbers. * Don't use floats to parse Lua version number. * Various fixes related to path normalization. LuaRocks 3.10.0 contains new commits by Roman Orekhov, Michael Savage, Pavel Balaev, Aleksei Volkov, Pierre Chapuis, Sebastian Hübner, and Hisham Muhammad. You can find detailed credits in the Git history. You can find all links for installation at https://luarocks.org — source packages for all supported platforms and binaries for Windows (32 and 64 bit) as well as Linux x86-64 are available. Special thanks go to Kong and itch.io for their continued commitment to open source, sponsoring the maintenance of the LuaRocks in various ways. Signed-off-by: Aapo Talvensaari --- .requirements | 2 +- build/luarocks/luarocks_repositories.bzl | 2 +- build/luarocks/luarocks_wrap_script.lua | 2 +- build/templates/venv-commons | 2 +- changelog/unreleased/kong/bump-luarocks.yml | 2 ++ 5 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog/unreleased/kong/bump-luarocks.yml diff --git a/.requirements b/.requirements index 286634dc112..d002fb23a4b 100644 --- a/.requirements +++ b/.requirements @@ -1,7 +1,7 @@ KONG_PACKAGE_NAME=kong OPENRESTY=1.25.3.1 -LUAROCKS=3.9.2 +LUAROCKS=3.10.0 OPENSSL=3.2.1 PCRE=10.43 LIBEXPAT=2.5.0 diff --git a/build/luarocks/luarocks_repositories.bzl b/build/luarocks/luarocks_repositories.bzl index 588595faf3d..7741e138b45 100644 --- a/build/luarocks/luarocks_repositories.bzl +++ b/build/luarocks/luarocks_repositories.bzl @@ -10,7 +10,7 @@ def luarocks_repositories(): name = "luarocks", build_file = "//build/luarocks:BUILD.luarocks.bazel", strip_prefix = "luarocks-" + version, - sha256 = "bca6e4ecc02c203e070acdb5f586045d45c078896f6236eb46aa33ccd9b94edb", + sha256 = "e9bf06d5ec6b8ecc6dbd1530d2d77bdb3377d814a197c46388e9f148548c1c89", urls = [ "https://luarocks.org/releases/luarocks-" + version + ".tar.gz", ], diff --git a/build/luarocks/luarocks_wrap_script.lua b/build/luarocks/luarocks_wrap_script.lua index 44e03cbaceb..18999e11a22 100644 --- a/build/luarocks/luarocks_wrap_script.lua +++ b/build/luarocks/luarocks_wrap_script.lua @@ -20,8 +20,8 @@ if install_dest:sub(-1) ~= "/" then install_dest = install_dest .. "/" end -- HACK -cfg.lua_interpreter = "luajit" cfg.sysconfdir = install_dest .. "etc/luarocks" +cfg.variables["LUA"] = install_dest .. "openresty/luajit/bin/luajit" cfg.variables["LUA_DIR"] = install_dest .. "openresty/luajit" cfg.variables["LUA_INCDIR"] = install_dest .. "openresty/luajit/include/luajit-2.1" cfg.variables["LUA_BINDIR"] = install_dest .. "openresty/luajit/bin" diff --git a/build/templates/venv-commons b/build/templates/venv-commons index f13613ca71d..f16a5aadbde 100644 --- a/build/templates/venv-commons +++ b/build/templates/venv-commons @@ -42,7 +42,7 @@ $KONG_VENV/openresty/site/lualib/?.lua;$KONG_VENV/openresty/site/lualib/?.ljbc;\ $KONG_VENV/openresty/site/lualib/?/init.lua;$KONG_VENV/openresty/site/lualib/?/init.ljbc;\ $KONG_VENV/openresty/lualib/?.lua;$KONG_VENV/openresty/lualib/?.ljbc;\ $KONG_VENV/openresty/lualib/?/init.lua;$KONG_VENV/openresty/lualib/?/init.ljbc;\ -$KONG_VENV/openresty/luajit/share/luajit-2.1.0-beta3/?.lua" +$KONG_VENV/openresty/luajit/share/luajit-2.1/?.lua" # support custom plugin development if [ -n $KONG_PLUGIN_PATH ] ; then diff --git a/changelog/unreleased/kong/bump-luarocks.yml b/changelog/unreleased/kong/bump-luarocks.yml new file mode 100644 index 00000000000..843bfaf358d --- /dev/null +++ b/changelog/unreleased/kong/bump-luarocks.yml @@ -0,0 +1,2 @@ +message: "Bumped LuaRocks from 3.9.2 to 3.10.0" +type: dependency From 654b334ea99b9924330bf4e8cfc62b98db00be38 Mon Sep 17 00:00:00 2001 From: Wangchong Zhou Date: Wed, 31 Jan 2024 21:39:33 +0800 Subject: [PATCH 89/91] Revert "hotfix(cd): skip comment on commit step (#12090)" This reverts commit cc6f139f5428c7e47786f7be283d53a4c6394b8a. --- .github/workflows/release.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bf074de740a..bc07e202999 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -401,7 +401,6 @@ jobs: - name: Comment on commit if: github.event_name == 'push' && matrix.label == 'ubuntu' uses: peter-evans/commit-comment@5a6f8285b8f2e8376e41fe1b563db48e6cf78c09 # v3.0.0 - continue-on-error: true # TODO: temporary fix until the token is back with: token: ${{ secrets.GHA_COMMENT_TOKEN }} body: | From 9566dd94efb80fc3f8d7a7c875603167f528e2e5 Mon Sep 17 00:00:00 2001 From: samugi Date: Fri, 23 Feb 2024 18:46:05 +0100 Subject: [PATCH 90/91] fix(ci): build and test use github token replace PAT with github_token --- .github/workflows/build_and_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 89614e85698..7b8170b387e 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -108,7 +108,7 @@ jobs: - name: Download runtimes file uses: Kong/gh-storage/download@v1 env: - GITHUB_TOKEN: ${{ secrets.PAT }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: repo-path: Kong/gateway-action-storage/main/.ci/runtimes.json From 7c06064e82a6ac73393aa81ffbb62204dbfa75dd Mon Sep 17 00:00:00 2001 From: Chrono Date: Wed, 6 Mar 2024 10:17:08 +0800 Subject: [PATCH 91/91] refactor(router/atc): separate expression transformation logic (#12650) --- kong-3.7.0-0.rockspec | 1 + kong/router/atc.lua | 128 +------ kong/router/compat.lua | 462 +---------------------- kong/router/expressions.lua | 11 +- kong/router/transform.lua | 638 ++++++++++++++++++++++++++++++++ kong/router/utils.lua | 35 -- spec/01-unit/08-router_spec.lua | 1 + 7 files changed, 655 insertions(+), 621 deletions(-) create mode 100644 kong/router/transform.lua diff --git a/kong-3.7.0-0.rockspec b/kong-3.7.0-0.rockspec index e5e0e42a6cb..3d1bdc2839d 100644 --- a/kong-3.7.0-0.rockspec +++ b/kong-3.7.0-0.rockspec @@ -65,6 +65,7 @@ build = { ["kong.router.expressions"] = "kong/router/expressions.lua", ["kong.router.atc"] = "kong/router/atc.lua", ["kong.router.fields"] = "kong/router/fields.lua", + ["kong.router.transform"] = "kong/router/transform.lua", ["kong.router.utils"] = "kong/router/utils.lua", ["kong.conf_loader"] = "kong/conf_loader/init.lua", diff --git a/kong/router/atc.lua b/kong/router/atc.lua index b186a1b29bb..3fed56771c4 100644 --- a/kong/router/atc.lua +++ b/kong/router/atc.lua @@ -2,10 +2,10 @@ local _M = {} local _MT = { __index = _M, } -local buffer = require("string.buffer") local lrucache = require("resty.lrucache") local tb_new = require("table.new") local utils = require("kong.router.utils") +local transform = require("kong.router.transform") local rat = require("kong.tools.request_aware_table") local yield = require("kong.tools.yield").yield @@ -15,9 +15,6 @@ local assert = assert local setmetatable = setmetatable local pairs = pairs local ipairs = ipairs -local tonumber = tonumber - - local max = math.max @@ -32,22 +29,15 @@ local ngx_ERR = ngx.ERR local check_select_params = utils.check_select_params local get_service_info = utils.get_service_info local route_match_stat = utils.route_match_stat +local split_host_port = transform.split_host_port local DEFAULT_MATCH_LRUCACHE_SIZE = utils.DEFAULT_MATCH_LRUCACHE_SIZE -local LOGICAL_OR = " || " -local LOGICAL_AND = " && " - - local is_http = ngx.config.subsystem == "http" --- reuse buffer object -local values_buf = buffer.new(64) - - local get_atc_context local get_atc_router local get_atc_fields @@ -129,67 +119,6 @@ do end -local is_empty_field -do - local null = ngx.null - local isempty = require("table.isempty") - - is_empty_field = function(f) - return f == nil or f == null or isempty(f) - end -end - - -local function escape_str(str) - -- raw string - if not str:find([["#]], 1, true) then - return "r#\"" .. str .. "\"#" - end - - -- standard string escaping (unlikely case) - if str:find([[\]], 1, true) then - str = str:gsub([[\]], [[\\]]) - end - - if str:find([["]], 1, true) then - str = str:gsub([["]], [[\"]]) - end - - return "\"" .. str .. "\"" -end - - -local function gen_for_field(name, op, vals, val_transform) - if is_empty_field(vals) then - return nil - end - - local vals_n = #vals - assert(vals_n > 0) - - values_buf:reset():put("(") - - for i = 1, vals_n do - local p = vals[i] - local op = (type(op) == "string") and op or op(p) - - if i > 1 then - values_buf:put(LOGICAL_OR) - end - - values_buf:putf("%s %s %s", name, op, - escape_str(val_transform and val_transform(op, p) or p)) - end - - -- consume the whole buffer - -- returns a local variable instead of using a tail call - -- to avoid NYI - local str = values_buf:put(")"):get() - - return str -end - - local function add_atc_matcher(inst, route, route_id, get_exp_and_priority, remove_existing) @@ -371,48 +300,6 @@ function _M.new(routes, cache, cache_neg, old_router, get_exp_and_priority) end --- split port in host, ignore form '[...]' --- example.com:123 => example.com, 123 --- example.*:123 => example.*, 123 -local split_host_port -do - local DEFAULT_HOSTS_LRUCACHE_SIZE = DEFAULT_MATCH_LRUCACHE_SIZE - - local memo_hp = lrucache.new(DEFAULT_HOSTS_LRUCACHE_SIZE) - - split_host_port = function(key) - if not key then - return nil, nil - end - - local m = memo_hp:get(key) - - if m then - return m[1], m[2] - end - - local p = key:find(":", nil, true) - if not p then - memo_hp:set(key, { key, nil }) - return key, nil - end - - local port = tonumber(key:sub(p + 1)) - - if not port then - memo_hp:set(key, { key, nil }) - return key, nil - end - - local host = key:sub(1, p - 1) - - memo_hp:set(key, { host, port }) - - return host, port - end -end - - local CACHE_PARAMS @@ -586,6 +473,7 @@ function _M:exec(ctx) return match_t end + else -- is stream subsystem @@ -708,16 +596,8 @@ function _M:exec(ctx) return match_t end -end -- if is_http - -_M.LOGICAL_OR = LOGICAL_OR -_M.LOGICAL_AND = LOGICAL_AND - -_M.escape_str = escape_str -_M.is_empty_field = is_empty_field -_M.gen_for_field = gen_for_field -_M.split_host_port = split_host_port +end -- if is_http return _M diff --git a/kong/router/compat.lua b/kong/router/compat.lua index df4285f21db..410168e8575 100644 --- a/kong/router/compat.lua +++ b/kong/router/compat.lua @@ -1,27 +1,21 @@ local _M = {} -local bit = require("bit") -local buffer = require("string.buffer") local atc = require("kong.router.atc") local utils = require("kong.router.utils") +local transform = require("kong.router.transform") local tb_new = require("table.new") local tb_nkeys = require("table.nkeys") local uuid = require("resty.jit-uuid") -local shallow_copy = require("kong.tools.utils").shallow_copy -local replace_dashes_lower = require("kong.tools.string").replace_dashes_lower +local shallow_copy = require("kong.tools.utils").shallow_copy local is_regex_magic = utils.is_regex_magic -local parse_ip_addr = utils.parse_ip_addr - - -local escape_str = atc.escape_str -local is_empty_field = atc.is_empty_field -local gen_for_field = atc.gen_for_field -local split_host_port = atc.split_host_port +local is_empty_field = transform.is_empty_field +local get_expression = transform.get_expression +local get_priority = transform.get_priority local type = type @@ -29,463 +23,17 @@ local pairs = pairs local ipairs = ipairs local assert = assert local tb_insert = table.insert -local byte = string.byte -local bor, band, lshift = bit.bor, bit.band, bit.lshift local is_http = ngx.config.subsystem == "http" -local DOT = byte(".") -local TILDE = byte("~") -local ASTERISK = byte("*") -local MAX_HEADER_COUNT = 255 - - --- reuse buffer objects -local expr_buf = buffer.new(128) -local hosts_buf = buffer.new(64) -local headers_buf = buffer.new(128) -local single_header_buf = buffer.new(64) - - --- sep: a seperator of expressions, like '&&' --- idx: indicate whether or not to add 'sep' --- for example, we should not add 'sep' for the first element in array -local function expression_append(buf, sep, str, idx) - if #buf > 0 and - (idx == nil or idx > 1) - then - buf:put(sep) - end - buf:put(str) -end - - -local OP_EQUAL = "==" -local OP_PREFIX = "^=" -local OP_POSTFIX = "=^" -local OP_REGEX = "~" -local OP_IN = "in" - - -local LOGICAL_OR = atc.LOGICAL_OR -local LOGICAL_AND = atc.LOGICAL_AND - - -- When splitting routes, we need to assign new UUIDs to the split routes. We use uuid v5 to generate them from -- the original route id and the path index so that incremental rebuilds see stable IDs for routes that have not -- changed. local uuid_generator = assert(uuid.factory_v5('7f145bf9-0dce-4f91-98eb-debbce4b9f6b')) -local function gen_for_nets(ip_field, port_field, vals) - if is_empty_field(vals) then - return nil - end - - local nets_buf = buffer.new(64):put("(") - - for i = 1, #vals do - local v = vals[i] - - if type(v) ~= "table" then - ngx.log(ngx.ERR, "sources/destinations elements must be a table") - return nil - end - - if is_empty_field(v) then - ngx.log(ngx.ERR, "sources/destinations elements must not be empty") - return nil - end - - local ip = v.ip - local port = v.port - - local exp_ip, exp_port - - if ip then - local addr, mask = parse_ip_addr(ip) - - if mask then -- ip in cidr - exp_ip = ip_field .. " " .. OP_IN .. " " .. - addr .. "/" .. mask - - else -- ip == addr - exp_ip = ip_field .. " " .. OP_EQUAL .. " " .. - addr - end - end - - if port then - exp_port = port_field .. " " .. OP_EQUAL .. " " .. port - end - - if not ip then - expression_append(nets_buf, LOGICAL_OR, exp_port, i) - goto continue - end - - if not port then - expression_append(nets_buf, LOGICAL_OR, exp_ip, i) - goto continue - end - - expression_append(nets_buf, LOGICAL_OR, - "(" .. exp_ip .. LOGICAL_AND .. exp_port .. ")", i) - - ::continue:: - end -- for - - local str = nets_buf:put(")"):get() - - -- returns a local variable instead of using a tail call - -- to avoid NYI - return str -end - - -local function get_expression(route) - local methods = route.methods - local hosts = route.hosts - local paths = route.paths - local headers = route.headers - local snis = route.snis - - local srcs = route.sources - local dsts = route.destinations - - expr_buf:reset() - - local gen = gen_for_field("tls.sni", OP_EQUAL, snis, function(_, p) - if #p > 1 and byte(p, -1) == DOT then - -- last dot in FQDNs must not be used for routing - return p:sub(1, -2) - end - - return p - end) - if gen then - -- See #6425, if `net.protocol` is not `https` - -- then SNI matching should simply not be considered - if srcs or dsts then - gen = "(net.protocol != r#\"tls\"#" .. LOGICAL_OR .. gen .. ")" - else - gen = "(net.protocol != r#\"https\"#" .. LOGICAL_OR .. gen .. ")" - end - - expression_append(expr_buf, LOGICAL_AND, gen) - end - - -- stream expression - - do - local src_gen = gen_for_nets("net.src.ip", "net.src.port", srcs) - local dst_gen = gen_for_nets("net.dst.ip", "net.dst.port", dsts) - - if src_gen then - expression_append(expr_buf, LOGICAL_AND, src_gen) - end - - if dst_gen then - expression_append(expr_buf, LOGICAL_AND, dst_gen) - end - - if src_gen or dst_gen then - -- returns a local variable instead of using a tail call - -- to avoid NYI - local str = expr_buf:get() - return str - end - end - - -- http expression - - local gen = gen_for_field("http.method", OP_EQUAL, methods) - if gen then - expression_append(expr_buf, LOGICAL_AND, gen) - end - - if not is_empty_field(hosts) then - hosts_buf:reset():put("(") - - for i, h in ipairs(hosts) do - local host, port = split_host_port(h) - - local op = OP_EQUAL - if byte(host) == ASTERISK then - -- postfix matching - op = OP_POSTFIX - host = host:sub(2) - - elseif byte(host, -1) == ASTERISK then - -- prefix matching - op = OP_PREFIX - host = host:sub(1, -2) - end - - local exp = "http.host ".. op .. " r#\"" .. host .. "\"#" - if port then - exp = "(" .. exp .. LOGICAL_AND .. - "net.dst.port ".. OP_EQUAL .. " " .. port .. ")" - end - expression_append(hosts_buf, LOGICAL_OR, exp, i) - end -- for route.hosts - - expression_append(expr_buf, LOGICAL_AND, - hosts_buf:put(")"):get()) - end - - gen = gen_for_field("http.path", function(path) - return is_regex_magic(path) and OP_REGEX or OP_PREFIX - end, paths, function(op, p) - if op == OP_REGEX then - -- 1. strip leading `~` - -- 2. prefix with `^` to match the anchored behavior of the traditional router - -- 3. update named capture opening tag for rust regex::Regex compatibility - return "^" .. p:sub(2):gsub("?<", "?P<") - end - - return p - end) - if gen then - expression_append(expr_buf, LOGICAL_AND, gen) - end - - if not is_empty_field(headers) then - headers_buf:reset() - - for h, v in pairs(headers) do - single_header_buf:reset():put("(") - - for i, value in ipairs(v) do - local name = "any(lower(http.headers." .. replace_dashes_lower(h) .. "))" - local op = OP_EQUAL - - -- value starts with "~*" - if byte(value, 1) == TILDE and byte(value, 2) == ASTERISK then - value = value:sub(3) - op = OP_REGEX - end - - expression_append(single_header_buf, LOGICAL_OR, - name .. " " .. op .. " " .. escape_str(value:lower()), i) - end - - expression_append(headers_buf, LOGICAL_AND, - single_header_buf:put(")"):get()) - end - - expression_append(expr_buf, LOGICAL_AND, headers_buf:get()) - end - - local str = expr_buf:get() - - -- returns a local variable instead of using a tail call - -- to avoid NYI - return str -end - - -local lshift_uint64 -do - local ffi = require("ffi") - local ffi_uint = ffi.new("uint64_t") - - lshift_uint64 = function(v, offset) - ffi_uint = v - return lshift(ffi_uint, offset) - end -end - - -local stream_get_priority -do - -- compatible with http priority - local STREAM_SNI_BIT = lshift_uint64(0x01ULL, 61) - - -- IP > PORT > CIDR - local IP_BIT = lshift_uint64(0x01ULL, 3) - local PORT_BIT = lshift_uint64(0x01ULL, 2) - local CIDR_BIT = lshift_uint64(0x01ULL, 0) - - local function calc_ip_weight(ips) - local weight = 0x0ULL - - if is_empty_field(ips) then - return weight - end - - for i = 1, #ips do - local ip = ips[i].ip - local port = ips[i].port - - if ip then - if ip:find("/", 1, true) then - weight = bor(weight, CIDR_BIT) - - else - weight = bor(weight, IP_BIT) - end - end - - if port then - weight = bor(weight, PORT_BIT) - end - end - - return weight - end - - stream_get_priority = function(snis, srcs, dsts) - local match_weight = 0x0ULL - - -- [sni] has higher priority than [src] or [dst] - if not is_empty_field(snis) then - match_weight = STREAM_SNI_BIT - end - - -- [src] + [dst] has higher priority than [sni] - if not is_empty_field(srcs) and - not is_empty_field(dsts) - then - match_weight = STREAM_SNI_BIT - end - - local src_bits = calc_ip_weight(srcs) - local dst_bits = calc_ip_weight(dsts) - - local priority = bor(match_weight, - lshift(src_bits, 4), - dst_bits) - - return priority - end -end - - -local PLAIN_HOST_ONLY_BIT = lshift_uint64(0x01ULL, 60) -local REGEX_URL_BIT = lshift_uint64(0x01ULL, 51) - - --- convert a route to a priority value for use in the ATC router --- priority must be a 64-bit non negative integer --- format (big endian): --- 0 1 2 3 --- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 --- +-----+-+---------------+-+-------------------------------------+ --- | W |P| Header |R| Regex | --- | G |L| |G| Priority | --- | T |N| Count |X| | --- +-----+-+-----------------+-------------------------------------+ --- | Regex Priority | Max Length | --- | (cont) | | --- | | | --- +-------------------------+-------------------------------------+ -local function get_priority(route) - local snis = route.snis - local srcs = route.sources - local dsts = route.destinations - - -- stream expression - - if not is_empty_field(srcs) or - not is_empty_field(dsts) - then - return stream_get_priority(snis, srcs, dsts) - end - - -- http expression - - local methods = route.methods - local hosts = route.hosts - local paths = route.paths - local headers = route.headers - - local match_weight = 0 -- 0x0ULL - - if not is_empty_field(methods) then - match_weight = match_weight + 1 - end - - if not is_empty_field(hosts) then - match_weight = match_weight + 1 - end - - local headers_count = is_empty_field(headers) and 0 or tb_nkeys(headers) - - if headers_count > 0 then - match_weight = match_weight + 1 - - if headers_count > MAX_HEADER_COUNT then - ngx.log(ngx.WARN, "too many headers in route ", route.id, - " headers count capped at ", MAX_HEADER_COUNT, - " when sorting") - headers_count = MAX_HEADER_COUNT - end - end - - if not is_empty_field(snis) then - match_weight = match_weight + 1 - end - - local plain_host_only = type(hosts) == "table" - - if plain_host_only then - for _, h in ipairs(hosts) do - if h:find("*", nil, true) then - plain_host_only = false - break - end - end - end - - local uri_length = 0 - local regex_url = false - - if not is_empty_field(paths) then - match_weight = match_weight + 1 - - local p = paths[1] - - if is_regex_magic(p) then - regex_url = true - - else - uri_length = #p - end - - for i = 2, #paths do - p = paths[i] - - if regex_url then - assert(is_regex_magic(p), - "cannot mix regex and non-regex paths in get_priority()") - - else - assert(#p == uri_length, - "cannot mix different length prefixes in get_priority()") - end - end - end - - local match_weight = lshift_uint64(match_weight, 61) - local headers_count = lshift_uint64(headers_count, 52) - - local regex_priority = lshift_uint64(regex_url and route.regex_priority or 0, 19) - local max_length = band(uri_length, 0x7FFFF) - - local priority = bor(match_weight, - plain_host_only and PLAIN_HOST_ONLY_BIT or 0, - regex_url and REGEX_URL_BIT or 0, - headers_count, - regex_priority, - max_length) - - return priority -end - - local function get_exp_and_priority(route) if route.expression then ngx.log(ngx.ERR, "expecting a traditional route while it's not (probably an expressions route). ", diff --git a/kong/router/expressions.lua b/kong/router/expressions.lua index 129689f1313..733aaeb88c6 100644 --- a/kong/router/expressions.lua +++ b/kong/router/expressions.lua @@ -5,15 +5,16 @@ local re_gsub = ngx.re.gsub local atc = require("kong.router.atc") -local gen_for_field = atc.gen_for_field +local transform = require("kong.router.transform") -local OP_EQUAL = "==" -local NET_PORT_REG = [[(net\.port)(\s*)([=> example.com, 123 +-- example.*:123 => example.*, 123 +local split_host_port +do + local tonumber = tonumber + + local DEFAULT_HOSTS_LRUCACHE_SIZE = utils.DEFAULT_MATCH_LRUCACHE_SIZE + + local memo_hp = lrucache.new(DEFAULT_HOSTS_LRUCACHE_SIZE) + + split_host_port = function(key) + if not key then + return nil, nil + end + + local m = memo_hp:get(key) + + if m then + return m[1], m[2] + end + + local p = key:find(":", nil, true) + if not p then + memo_hp:set(key, { key, nil }) + return key, nil + end + + local port = tonumber(key:sub(p + 1)) + + if not port then + memo_hp:set(key, { key, nil }) + return key, nil + end + + local host = key:sub(1, p - 1) + + memo_hp:set(key, { host, port }) + + return host, port + end +end + + +local LOGICAL_OR = " || " +local LOGICAL_AND = " && " + + +local OP_EQUAL = "==" +local OP_PREFIX = "^=" +local OP_POSTFIX = "=^" +local OP_REGEX = "~" +local OP_IN = "in" + + +local DOT = byte(".") +local TILDE = byte("~") +local ASTERISK = byte("*") + + +-- reuse buffer objects +local values_buf = buffer.new(64) +local nets_buf = buffer.new(64) +local expr_buf = buffer.new(64) +local hosts_buf = buffer.new(64) +local headers_buf = buffer.new(64) +local single_header_buf = buffer.new(64) + + +-- sep: a seperator of expressions, like '&&' +-- idx: indicate whether or not to add 'sep' +-- for example, we should not add 'sep' for the first element in array +local function expression_append(buf, sep, str, idx) + if #buf > 0 and + (idx == nil or idx > 1) + then + buf:put(sep) + end + buf:put(str) +end + + +local function gen_for_field(name, op, vals, val_transform) + if is_empty_field(vals) then + return nil + end + + local vals_n = #vals + assert(vals_n > 0) + + values_buf:reset():put("(") + + for i = 1, vals_n do + local p = vals[i] + local op = (type(op) == "string") and op or op(p) + + local expr = fmt("%s %s %s", name, op, + escape_str(val_transform and val_transform(op, p) or p)) + + expression_append(values_buf, LOGICAL_OR, expr, i) + end + + -- consume the whole buffer + -- returns a local variable instead of using a tail call + -- to avoid NYI + local str = values_buf:put(")"):get() + + return str +end + + +local function parse_ip_addr(ip) + local addr, mask = ipmatcher.split_ip(ip) + + if not mask then + return addr + end + + local ipv4 = ipmatcher.parse_ipv4(addr) + + -- FIXME: support ipv6 + if not ipv4 then + return addr, mask + end + + local cidr = lshift(rshift(ipv4, 32 - mask), 32 - mask) + + local n1 = band( cidr , 0xff) + local n2 = band(rshift(cidr, 8), 0xff) + local n3 = band(rshift(cidr, 16), 0xff) + local n4 = band(rshift(cidr, 24), 0xff) + + return n4 .. "." .. n3 .. "." .. n2 .. "." .. n1, mask +end + + +local function gen_for_nets(ip_field, port_field, vals) + if is_empty_field(vals) then + return nil + end + + nets_buf:reset():put("(") + + for i = 1, #vals do + local v = vals[i] + + if type(v) ~= "table" then + ngx.log(ngx.ERR, "sources/destinations elements must be a table") + return nil + end + + if is_empty_field(v) then + ngx.log(ngx.ERR, "sources/destinations elements must not be empty") + return nil + end + + local ip = v.ip + local port = v.port + + local exp_ip, exp_port + + if not is_null(ip) then + local addr, mask = parse_ip_addr(ip) + + if mask then -- ip in cidr + exp_ip = ip_field .. " " .. OP_IN .. " " .. + addr .. "/" .. mask + + else -- ip == addr + exp_ip = ip_field .. " " .. OP_EQUAL .. " " .. + addr + end + end + + if not is_null(port) then + exp_port = port_field .. " " .. OP_EQUAL .. " " .. port + end + + -- only add port expression + if is_null(ip) then + expression_append(nets_buf, LOGICAL_OR, exp_port, i) + goto continue + end + + -- only add ip address expression + if is_null(port) then + expression_append(nets_buf, LOGICAL_OR, exp_ip, i) + goto continue + end + + -- add port and ip address expression with '()' + expression_append(nets_buf, LOGICAL_OR, + "(" .. exp_ip .. LOGICAL_AND .. exp_port .. ")", i) + + ::continue:: + end -- for + + local str = nets_buf:put(")"):get() + + -- returns a local variable instead of using a tail call + -- to avoid NYI + return str +end + + +local is_stream_route +do + local is_stream_protocol = { + tcp = true, + udp = true, + tls = true, + tls_passthrough = true, + } + + is_stream_route = function(r) + if not r.protocols then + return false + end + + return is_stream_protocol[r.protocols[1]] + end +end + + +local function get_expression(route) + local methods = route.methods + local hosts = route.hosts + local paths = route.paths + local headers = route.headers + local snis = route.snis + + local srcs = route.sources + local dsts = route.destinations + + expr_buf:reset() + + local gen = gen_for_field("tls.sni", OP_EQUAL, snis, function(_, p) + if #p > 1 and byte(p, -1) == DOT then + -- last dot in FQDNs must not be used for routing + return p:sub(1, -2) + end + + return p + end) + if gen then + -- See #6425, if `net.protocol` is not `https` + -- then SNI matching should simply not be considered + if is_stream_route(route) then + gen = "(net.protocol != r#\"tls\"#" .. LOGICAL_OR .. gen .. ")" + else + gen = "(net.protocol != r#\"https\"#" .. LOGICAL_OR .. gen .. ")" + end + + expression_append(expr_buf, LOGICAL_AND, gen) + end + + -- now http route support net.src.* and net.dst.* + + local src_gen = gen_for_nets("net.src.ip", "net.src.port", srcs) + local dst_gen = gen_for_nets("net.dst.ip", "net.dst.port", dsts) + + if src_gen then + expression_append(expr_buf, LOGICAL_AND, src_gen) + end + + if dst_gen then + expression_append(expr_buf, LOGICAL_AND, dst_gen) + end + + -- stream expression, protocol = tcp/udp/tls/tls_passthrough + + if is_stream_route(route) then + -- returns a local variable instead of using a tail call + -- to avoid NYI + local str = expr_buf:get() + return str + end + + -- http expression, protocol = http/https/grpc/grpcs + + local gen = gen_for_field("http.method", OP_EQUAL, methods) + if gen then + expression_append(expr_buf, LOGICAL_AND, gen) + end + + if not is_empty_field(hosts) then + hosts_buf:reset():put("(") + + for i, h in ipairs(hosts) do + local host, port = split_host_port(h) + + local op = OP_EQUAL + if byte(host) == ASTERISK then + -- postfix matching + op = OP_POSTFIX + host = host:sub(2) + + elseif byte(host, -1) == ASTERISK then + -- prefix matching + op = OP_PREFIX + host = host:sub(1, -2) + end + + local exp = "http.host ".. op .. " r#\"" .. host .. "\"#" + if port then + exp = "(" .. exp .. LOGICAL_AND .. + "net.dst.port ".. OP_EQUAL .. " " .. port .. ")" + end + expression_append(hosts_buf, LOGICAL_OR, exp, i) + end -- for route.hosts + + expression_append(expr_buf, LOGICAL_AND, + hosts_buf:put(")"):get()) + end + + gen = gen_for_field("http.path", function(path) + return is_regex_magic(path) and OP_REGEX or OP_PREFIX + end, paths, function(op, p) + if op == OP_REGEX then + -- 1. strip leading `~` + -- 2. prefix with `^` to match the anchored behavior of the traditional router + -- 3. update named capture opening tag for rust regex::Regex compatibility + return "^" .. p:sub(2):gsub("?<", "?P<") + end + + return p + end) + if gen then + expression_append(expr_buf, LOGICAL_AND, gen) + end + + if not is_empty_field(headers) then + headers_buf:reset() + + for h, v in pairs(headers) do + single_header_buf:reset():put("(") + + for i, value in ipairs(v) do + local name = "any(lower(http.headers." .. replace_dashes_lower(h) .. "))" + local op = OP_EQUAL + + -- value starts with "~*" + if byte(value, 1) == TILDE and byte(value, 2) == ASTERISK then + value = value:sub(3) + op = OP_REGEX + end + + expression_append(single_header_buf, LOGICAL_OR, + name .. " " .. op .. " " .. escape_str(value:lower()), i) + end + + expression_append(headers_buf, LOGICAL_AND, + single_header_buf:put(")"):get()) + end + + expression_append(expr_buf, LOGICAL_AND, headers_buf:get()) + end + + local str = expr_buf:get() + + -- returns a local variable instead of using a tail call + -- to avoid NYI + return str +end + + +local lshift_uint64 +do + local ffi = require("ffi") + local ffi_uint = ffi.new("uint64_t") + + lshift_uint64 = function(v, offset) + ffi_uint = v + return lshift(ffi_uint, offset) + end +end + + +local stream_get_priority +do + -- compatible with http priority + local STREAM_SNI_BIT = lshift_uint64(0x01ULL, 61) + + -- IP > PORT > CIDR + local IP_BIT = lshift_uint64(0x01ULL, 3) + local PORT_BIT = lshift_uint64(0x01ULL, 2) + local CIDR_BIT = lshift_uint64(0x01ULL, 0) + + local function calc_ip_weight(ips) + local weight = 0x0ULL + + if is_empty_field(ips) then + return weight + end + + for i = 1, #ips do + local ip = ips[i].ip + local port = ips[i].port + + if not is_null(ip) then + if ip:find("/", 1, true) then + weight = bor(weight, CIDR_BIT) + + else + weight = bor(weight, IP_BIT) + end + end + + if not is_null(port) then + weight = bor(weight, PORT_BIT) + end + end + + return weight + end + + stream_get_priority = function(snis, srcs, dsts) + local match_weight = 0x0ULL + + -- [sni] has higher priority than [src] or [dst] + if not is_empty_field(snis) then + match_weight = STREAM_SNI_BIT + end + + -- [src] + [dst] has higher priority than [sni] + if not is_empty_field(srcs) and + not is_empty_field(dsts) + then + match_weight = STREAM_SNI_BIT + end + + local src_bits = calc_ip_weight(srcs) + local dst_bits = calc_ip_weight(dsts) + + local priority = bor(match_weight, + lshift_uint64(src_bits, 4), + dst_bits) + + return priority + end +end + + +local MAX_HEADER_COUNT = 255 + + +local PLAIN_HOST_ONLY_BIT = lshift_uint64(0x01ULL, 60) +local REGEX_URL_BIT = lshift_uint64(0x01ULL, 51) + + +-- convert a route to a priority value for use in the ATC router +-- priority must be a 64-bit non negative integer +-- format (big endian): +-- 0 1 2 3 +-- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-- +-----+-+---------------+-+-------------------------------------+ +-- | W |P| Header |R| Regex | +-- | G |L| |G| Priority | +-- | T |N| Count |X| | +-- +-----+-+-----------------+-------------------------------------+ +-- | Regex Priority | Max Length | +-- | (cont) | | +-- | | | +-- +-------------------------+-------------------------------------+ +local function get_priority(route) + local snis = route.snis + local srcs = route.sources + local dsts = route.destinations + + -- stream expression + + if not is_empty_field(srcs) or + not is_empty_field(dsts) + then + return stream_get_priority(snis, srcs, dsts) + end + + -- http expression + + local methods = route.methods + local hosts = route.hosts + local paths = route.paths + local headers = route.headers + + local match_weight = 0 -- 0x0ULL + + if not is_empty_field(methods) then + match_weight = match_weight + 1 + end + + if not is_empty_field(hosts) then + match_weight = match_weight + 1 + end + + local headers_count = is_empty_field(headers) and 0 or tb_nkeys(headers) + + if headers_count > 0 then + match_weight = match_weight + 1 + + if headers_count > MAX_HEADER_COUNT then + ngx.log(ngx.WARN, "too many headers in route ", route.id, + " headers count capped at ", MAX_HEADER_COUNT, + " when sorting") + headers_count = MAX_HEADER_COUNT + end + end + + if not is_empty_field(snis) then + match_weight = match_weight + 1 + end + + local plain_host_only = type(hosts) == "table" + + if plain_host_only then + for _, h in ipairs(hosts) do + if h:find("*", nil, true) then + plain_host_only = false + break + end + end + end + + local uri_length = 0 + local regex_url = false + + if not is_empty_field(paths) then + match_weight = match_weight + 1 + + local p = paths[1] + + if is_regex_magic(p) then + regex_url = true + + else + uri_length = #p + end + + for i = 2, #paths do + p = paths[i] + + if regex_url then + assert(is_regex_magic(p), + "cannot mix regex and non-regex paths in get_priority()") + + else + assert(#p == uri_length, + "cannot mix different length prefixes in get_priority()") + end + end + end + + local match_weight = lshift_uint64(match_weight, 61) + local headers_count = lshift_uint64(headers_count, 52) + + local regex_priority = lshift_uint64(regex_url and route.regex_priority or 0, 19) + local max_length = band(uri_length, 0x7FFFF) + + local priority = bor(match_weight, + plain_host_only and PLAIN_HOST_ONLY_BIT or 0, + regex_url and REGEX_URL_BIT or 0, + headers_count, + regex_priority, + max_length) + + return priority +end + + +return { + OP_EQUAL = OP_EQUAL, + + LOGICAL_OR = LOGICAL_OR, + LOGICAL_AND = LOGICAL_AND, + + split_host_port = split_host_port, + + is_empty_field = is_empty_field, + gen_for_field = gen_for_field, + + get_expression = get_expression, + get_priority = get_priority, +} + diff --git a/kong/router/utils.lua b/kong/router/utils.lua index a70eb5077c9..5c3af208673 100644 --- a/kong/router/utils.lua +++ b/kong/router/utils.lua @@ -389,39 +389,6 @@ do end -local parse_ip_addr -do - local bit = require("bit") - local ipmatcher = require("resty.ipmatcher") - - local band, lshift, rshift = bit.band, bit.lshift, bit.rshift - - parse_ip_addr = function(ip) - local addr, mask = ipmatcher.split_ip(ip) - - if not mask then - return addr - end - - local ipv4 = ipmatcher.parse_ipv4(addr) - - -- FIXME: support ipv6 - if not ipv4 then - return addr, mask - end - - local cidr = lshift(rshift(ipv4, 32 - mask), 32 - mask) - - local n1 = band( cidr , 0xff) - local n2 = band(rshift(cidr, 8), 0xff) - local n3 = band(rshift(cidr, 16), 0xff) - local n4 = band(rshift(cidr, 24), 0xff) - - return n4 .. "." .. n3 .. "." .. n2 .. "." .. n1, mask - end -end - - return { DEFAULT_MATCH_LRUCACHE_SIZE = DEFAULT_MATCH_LRUCACHE_SIZE, @@ -435,6 +402,4 @@ return { route_match_stat = route_match_stat, is_regex_magic = is_regex_magic, phonehome_statistics = phonehome_statistics, - - parse_ip_addr = parse_ip_addr, } diff --git a/spec/01-unit/08-router_spec.lua b/spec/01-unit/08-router_spec.lua index f209586c895..3078c907f82 100644 --- a/spec/01-unit/08-router_spec.lua +++ b/spec/01-unit/08-router_spec.lua @@ -4699,6 +4699,7 @@ for _, flavor in ipairs({ "traditional", "traditional_compatible", "expressions" service = service, route = { id = "e8fb37f1-102d-461e-9c51-6608a6bb8101", + protocols = { "tls" }, snis = { "www.example.org" }, sources = { { ip = "127.0.0.1" },