From 358fff3869ce330880827a947db8bac3b5066215 Mon Sep 17 00:00:00 2001 From: Michael Martin Date: Wed, 11 Dec 2024 11:44:50 -0800 Subject: [PATCH 01/18] tests(*): shutdown timerng instance after test completion (#14005) --- .busted | 1 + kong-3.10.0-0.rockspec | 1 + kong/cmd/init.lua | 11 ++----- kong/cmd/utils/timer.lua | 18 +++++++++++ spec/busted-ci-helper.lua | 67 +++++++++++++++++++++++---------------- 5 files changed, 63 insertions(+), 35 deletions(-) create mode 100644 kong/cmd/utils/timer.lua diff --git a/.busted b/.busted index 57e2f9eabc2f..706d058cbb34 100644 --- a/.busted +++ b/.busted @@ -3,5 +3,6 @@ return { lpath = "./?.lua;./?/init.lua;", -- make setup() and teardown() behave like their lazy_ variants lazy = true, + helper = "./spec/busted-ci-helper.lua", } } diff --git a/kong-3.10.0-0.rockspec b/kong-3.10.0-0.rockspec index e8eac2999739..2ba7c9e54f2c 100644 --- a/kong-3.10.0-0.rockspec +++ b/kong-3.10.0-0.rockspec @@ -155,6 +155,7 @@ build = { ["kong.cmd.utils.prefix_handler"] = "kong/cmd/utils/prefix_handler.lua", ["kong.cmd.utils.process_secrets"] = "kong/cmd/utils/process_secrets.lua", ["kong.cmd.utils.inject_confs"] = "kong/cmd/utils/inject_confs.lua", + ["kong.cmd.utils.timer"] = "kong/cmd/utils/timer.lua", ["kong.api"] = "kong/api/init.lua", ["kong.api.api_helpers"] = "kong/api/api_helpers.lua", diff --git a/kong/cmd/init.lua b/kong/cmd/init.lua index 609d8c6f6cfc..28fe22416348 100644 --- a/kong/cmd/init.lua +++ b/kong/cmd/init.lua @@ -4,13 +4,7 @@ math.randomseed() -- Generate PRNG seed local pl_app = require "pl.lapp" local log = require "kong.cmd.utils.log" - -local function stop_timers() - -- shutdown lua-resty-timer-ng to allow the nginx worker to stop quickly - if _G.timerng then - _G.timerng:destroy() - end -end +local timer = require "kong.cmd.utils.timer" return function(cmd_name, args) local cmd = require("kong.cmd." .. cmd_name) @@ -42,5 +36,6 @@ return function(cmd_name, args) pl_app.quit(nil, true) end) - stop_timers() + -- shutdown lua-resty-timer-ng to allow the nginx worker to stop quickly + timer.shutdown() end diff --git a/kong/cmd/utils/timer.lua b/kong/cmd/utils/timer.lua new file mode 100644 index 000000000000..511bbaf4cc37 --- /dev/null +++ b/kong/cmd/utils/timer.lua @@ -0,0 +1,18 @@ +local _M = {} + +function _M.shutdown() + if _G.timerng then + pcall(_G.timerng.destroy, _G.timerng) + end + + -- kong.init_worker() stashes the timerng instance within the kong global and + -- removes the _G.timerng reference, so check there too + if _G.kong and _G.kong.timer and _G.kong.timer ~= _G.timerng then + pcall(_G.kong.timer.destroy, _G.kong.timer) + _G.kong.timer = nil + end + + _G.timerng = nil +end + +return _M diff --git a/spec/busted-ci-helper.lua b/spec/busted-ci-helper.lua index 1e9526a81179..2260138b890d 100644 --- a/spec/busted-ci-helper.lua +++ b/spec/busted-ci-helper.lua @@ -1,43 +1,55 @@ -- busted-ci-helper.lua +local busted = require 'busted' +do + local shutdown_timers = require("kong.cmd.utils.timer").shutdown + assert(type(shutdown_timers) == "function") --- needed before requiring 'socket.unix' -require 'socket' + -- shutdown lua-resty-timer-ng to allow the nginx worker to stop quickly + busted.subscribe({ 'exit' }, function() + shutdown_timers() -local busted = require 'busted' -local cjson = require 'cjson' -local socket_unix = require 'socket.unix' + -- second return value must be `true`, or else all other callbacks for this + -- event will be skipped + return nil, true + end) +end -local busted_event_path = os.getenv("BUSTED_EVENT_PATH") +local BUSTED_EVENT_PATH = os.getenv("BUSTED_EVENT_PATH") +if BUSTED_EVENT_PATH then + -- needed before requiring 'socket.unix' + require 'socket' --- Function to recursively copy a table, skipping keys associated with functions -local function copyTable(original, copied, cache, max_depth, current_depth) - copied = copied or {} - cache = cache or {} - max_depth = max_depth or 5 - current_depth = current_depth or 1 + local cjson = require 'cjson' + local socket_unix = require 'socket.unix' - if cache[original] then return cache[original] end - cache[original] = copied + -- Function to recursively copy a table, skipping keys associated with functions + local function copyTable(original, copied, cache, max_depth, current_depth) + copied = copied or {} + cache = cache or {} + max_depth = max_depth or 5 + current_depth = current_depth or 1 - for key, value in pairs(original) do - if type(value) == "table" then - if current_depth < max_depth then - copied[key] = copyTable(value, {}, cache, max_depth, current_depth + 1) + if cache[original] then return cache[original] end + cache[original] = copied + + for key, value in pairs(original) do + if type(value) == "table" then + if current_depth < max_depth then + copied[key] = copyTable(value, {}, cache, max_depth, current_depth + 1) + end + elseif type(value) == "userdata" then + copied[key] = tostring(value) + elseif type(value) ~= "function" then + copied[key] = value end - elseif type(value) == "userdata" then - copied[key] = tostring(value) - elseif type(value) ~= "function" then - copied[key] = value end - end - return copied -end + return copied + end -if busted_event_path then local sock = assert(socket_unix()) - assert(sock:connect(busted_event_path)) + assert(sock:connect(BUSTED_EVENT_PATH)) local events = {{ 'suite', 'reset' }, { 'suite', 'start' }, @@ -51,6 +63,7 @@ if busted_event_path then { 'error', 'it' }, { 'failure' }, { 'error' }} + for _, event in ipairs(events) do busted.subscribe(event, function (...) local args = {} From 2af3a76bb161fa640ae33d5d909b692c423b8612 Mon Sep 17 00:00:00 2001 From: Aapo Talvensaari Date: Fri, 13 Dec 2024 17:56:26 +0200 Subject: [PATCH 02/18] refactor(sandbox): improve sandbox (#10900) Signed-off-by: Aapo Talvensaari --- .luacheckrc | 2 +- kong-3.10.0-0.rockspec | 12 +- kong.conf.default | 10 +- kong/db/init.lua | 5 +- kong/db/migrations/state.lua | 2 +- kong/db/strategies/postgres/connector.lua | 2 +- kong/plugins/file-log/handler.lua | 5 +- kong/plugins/http-log/handler.lua | 4 +- kong/plugins/loggly/handler.lua | 5 +- kong/plugins/pre-function/_handler.lua | 19 +- kong/plugins/syslog/handler.lua | 5 +- kong/plugins/tcp-log/handler.lua | 5 +- kong/plugins/udp-log/handler.lua | 5 +- kong/tools/kong-lua-sandbox.lua | 181 +--------- kong/tools/sandbox.lua | 205 ----------- kong/tools/sandbox/environment/handler.lua | 199 +++++++++++ kong/tools/sandbox/environment/init.lua | 185 ++++++++++ kong/tools/sandbox/environment/lua.lua | 39 ++ kong/tools/sandbox/environment/schema.lua | 10 + kong/tools/sandbox/init.lua | 334 ++++++++++++++++++ kong/tools/sandbox/kong.lua | 125 +++++++ kong/tools/sandbox/require/handler.lua | 62 ++++ kong/tools/sandbox/require/init.lua | 48 +++ kong/tools/sandbox/require/lua.lua | 15 + kong/tools/sandbox/require/schema.lua | 13 + spec/01-unit/20-sandbox_spec.lua | 4 +- spec/03-plugins/18-acl/02-access_spec.lua | 12 +- .../36-request-transformer/02-access_spec.lua | 8 +- 28 files changed, 1071 insertions(+), 450 deletions(-) delete mode 100644 kong/tools/sandbox.lua create mode 100644 kong/tools/sandbox/environment/handler.lua create mode 100644 kong/tools/sandbox/environment/init.lua create mode 100644 kong/tools/sandbox/environment/lua.lua create mode 100644 kong/tools/sandbox/environment/schema.lua create mode 100644 kong/tools/sandbox/init.lua create mode 100644 kong/tools/sandbox/kong.lua create mode 100644 kong/tools/sandbox/require/handler.lua create mode 100644 kong/tools/sandbox/require/init.lua create mode 100644 kong/tools/sandbox/require/lua.lua create mode 100644 kong/tools/sandbox/require/schema.lua diff --git a/.luacheckrc b/.luacheckrc index 6bb537398eff..03e0b7c5a0b8 100644 --- a/.luacheckrc +++ b/.luacheckrc @@ -30,7 +30,7 @@ exclude_files = { "bazel-kong", } -files["kong/tools/kong-lua-sandbox.lua"] = { +files["kong/tools/sandbox/kong.lua"] = { read_globals = { "_ENV", "table.pack", diff --git a/kong-3.10.0-0.rockspec b/kong-3.10.0-0.rockspec index 2ba7c9e54f2c..22e19b4fefe9 100644 --- a/kong-3.10.0-0.rockspec +++ b/kong-3.10.0-0.rockspec @@ -194,7 +194,6 @@ build = { ["kong.tools.stream_api"] = "kong/tools/stream_api.lua", ["kong.tools.queue"] = "kong/tools/queue.lua", ["kong.tools.queue_schema"] = "kong/tools/queue_schema.lua", - ["kong.tools.sandbox"] = "kong/tools/sandbox.lua", ["kong.tools.uri"] = "kong/tools/uri.lua", ["kong.tools.kong-lua-sandbox"] = "kong/tools/kong-lua-sandbox.lua", ["kong.tools.protobuf"] = "kong/tools/protobuf.lua", @@ -217,6 +216,17 @@ build = { ["kong.tools.redis.schema"] = "kong/tools/redis/schema.lua", ["kong.tools.aws_stream"] = "kong/tools/aws_stream.lua", + ["kong.tools.sandbox"] = "kong/tools/sandbox/init.lua", + ["kong.tools.sandbox.kong"] = "kong/tools/sandbox/kong.lua", + ["kong.tools.sandbox.environment"] = "kong/tools/sandbox/environment/init.lua", + ["kong.tools.sandbox.environment.handler"] = "kong/tools/sandbox/environment/handler.lua", + ["kong.tools.sandbox.environment.lua"] = "kong/tools/sandbox/environment/lua.lua", + ["kong.tools.sandbox.environment.schema"] = "kong/tools/sandbox/environment/schema.lua", + ["kong.tools.sandbox.require"] = "kong/tools/sandbox/require/init.lua", + ["kong.tools.sandbox.require.handler"] = "kong/tools/sandbox/require/handler.lua", + ["kong.tools.sandbox.require.lua"] = "kong/tools/sandbox/require/lua.lua", + ["kong.tools.sandbox.require.schema"] = "kong/tools/sandbox/require/schema.lua", + ["kong.runloop.handler"] = "kong/runloop/handler.lua", ["kong.runloop.events"] = "kong/runloop/events.lua", ["kong.runloop.log_level"] = "kong/runloop/log_level.lua", diff --git a/kong.conf.default b/kong.conf.default index 9721e0d6449b..c711c98e11a6 100644 --- a/kong.conf.default +++ b/kong.conf.default @@ -1894,9 +1894,10 @@ # them. The sandboxed function has # restricted access to the global # environment and only has access - # to standard Lua functions that - # will generally not cause harm to - # the Kong Gateway node. + # to Kong PDK, OpenResty, and + # standard Lua functions that will + # generally not cause harm to the + # Kong Gateway node. # # * `on`: Functions have unrestricted # access to the global environment and @@ -1920,9 +1921,6 @@ # functions are not allowed, like: # `os.execute('rm -rf /*')`. # - # For a full allowed/disallowed list, see: - # https://github.com/kikito/sandbox.lua/blob/master/sandbox.lua - # # To customize the sandbox environment, use # the `untrusted_lua_sandbox_requires` and # `untrusted_lua_sandbox_environment` diff --git a/kong/db/init.lua b/kong/db/init.lua index edf44f2ac46d..78df157d82e1 100644 --- a/kong/db/init.lua +++ b/kong/db/init.lua @@ -102,7 +102,7 @@ function DB.new(kong_config, strategy) strategy = strategy, errors = errors, infos = connector:infos(), - kong_config = kong_config, + loaded_plugins = kong_config.loaded_plugins, -- left for MigrationsState.load } do @@ -444,8 +444,7 @@ do return nil, prefix_err(self, err) end - local ok, err = self.connector:schema_bootstrap(self.kong_config, - DEFAULT_LOCKS_TTL) + local ok, err = self.connector:schema_bootstrap(DEFAULT_LOCKS_TTL) self.connector:close() diff --git a/kong/db/migrations/state.lua b/kong/db/migrations/state.lua index a703a1fc1b38..ec22d9b9c617 100644 --- a/kong/db/migrations/state.lua +++ b/kong/db/migrations/state.lua @@ -184,7 +184,7 @@ function State.load(db) log.debug("loading subsystems migrations...") - local subsystems, err = load_subsystems(db, db.kong_config.loaded_plugins) + local subsystems, err = load_subsystems(db, db.loaded_plugins) if not subsystems then return nil, prefix_err(db, err) end diff --git a/kong/db/strategies/postgres/connector.lua b/kong/db/strategies/postgres/connector.lua index 4220ba01f2d1..0c5964f9230f 100644 --- a/kong/db/strategies/postgres/connector.lua +++ b/kong/db/strategies/postgres/connector.lua @@ -775,7 +775,7 @@ function _mt:schema_migrations() end -function _mt:schema_bootstrap(kong_config, default_locks_ttl) +function _mt:schema_bootstrap(default_locks_ttl) local conn = self:get_stored_connection() if not conn then error("no connection") diff --git a/kong/plugins/file-log/handler.lua b/kong/plugins/file-log/handler.lua index c7d4fe9a1954..b1e86b9311f4 100644 --- a/kong/plugins/file-log/handler.lua +++ b/kong/plugins/file-log/handler.lua @@ -24,9 +24,6 @@ local oflags = bit.bor(O_WRONLY, O_CREAT, O_APPEND) local mode = ffi.new("int", bit.bor(S_IRUSR, S_IWUSR, S_IRGRP, S_IROTH)) -local sandbox_opts = { env = { kong = kong, ngx = ngx } } - - local C = ffi.C @@ -73,7 +70,7 @@ function FileLogHandler:log(conf) if conf.custom_fields_by_lua then local set_serialize_value = kong.log.set_serialize_value for key, expression in pairs(conf.custom_fields_by_lua) do - set_serialize_value(key, sandbox(expression, sandbox_opts)()) + set_serialize_value(key, sandbox(expression)()) end end diff --git a/kong/plugins/http-log/handler.lua b/kong/plugins/http-log/handler.lua index fd1d0cd48eeb..0e524cfe79de 100644 --- a/kong/plugins/http-log/handler.lua +++ b/kong/plugins/http-log/handler.lua @@ -16,8 +16,6 @@ local pairs = pairs local max = math.max -local sandbox_opts = { env = { kong = kong, ngx = ngx } } - -- Create a function that concatenates multiple JSON objects into a JSON array. -- This saves us from rendering all entries into one large JSON string. -- Each invocation of the function returns the next bit of JSON, i.e. the opening @@ -183,7 +181,7 @@ function HttpLogHandler:log(conf) if conf.custom_fields_by_lua then local set_serialize_value = kong.log.set_serialize_value for key, expression in pairs(conf.custom_fields_by_lua) do - set_serialize_value(key, sandbox(expression, sandbox_opts)()) + set_serialize_value(key, sandbox(expression)()) end end diff --git a/kong/plugins/loggly/handler.lua b/kong/plugins/loggly/handler.lua index 39e70547e66f..7d408815dba7 100644 --- a/kong/plugins/loggly/handler.lua +++ b/kong/plugins/loggly/handler.lua @@ -14,9 +14,6 @@ local concat = table.concat local insert = table.insert -local sandbox_opts = { env = { kong = kong, ngx = ngx } } - - local HOSTNAME = get_host_name() local SENDER_NAME = "kong" local LOG_LEVELS = { @@ -127,7 +124,7 @@ function LogglyLogHandler:log(conf) if conf.custom_fields_by_lua then local set_serialize_value = kong.log.set_serialize_value for key, expression in pairs(conf.custom_fields_by_lua) do - set_serialize_value(key, sandbox(expression, sandbox_opts)()) + set_serialize_value(key, sandbox(expression)()) end end diff --git a/kong/plugins/pre-function/_handler.lua b/kong/plugins/pre-function/_handler.lua index ebac8fada298..3c06421953da 100644 --- a/kong/plugins/pre-function/_handler.lua +++ b/kong/plugins/pre-function/_handler.lua @@ -1,24 +1,13 @@ -local resty_mlcache = require "kong.resty.mlcache" local sandbox = require "kong.tools.sandbox" local kong_meta = require "kong.meta" + -- handler file for both the pre-function and post-function plugin local config_cache do - local no_op = function() end - local shm_name = "kong_db_cache" - local cache_name = "serverless_" .. shm_name - local cache = resty_mlcache.new(cache_name, shm_name, { lru_size = 1e4 }) - local sandbox_kong = setmetatable({ - cache = cache, - configuration = kong.configuration.remove_sensitive() - }, { __index = kong }) - - local sandbox_opts = { env = { kong = sandbox_kong, ngx = ngx } } - -- compiles the array for a phase into a single function local function compile_phase_array(phase_funcs) if not phase_funcs or #phase_funcs == 0 then @@ -28,7 +17,7 @@ local config_cache do -- compile the functions we got local compiled = {} for i, func_string in ipairs(phase_funcs) do - local func = assert(sandbox.sandbox(func_string, sandbox_opts)) + local func = assert(sandbox.sandbox(func_string)) local first_run_complete = false compiled[i] = function() @@ -73,11 +62,9 @@ local config_cache do end end - local phases = { "certificate", "rewrite", "access", "header_filter", "body_filter", "log" } - config_cache = setmetatable({}, { __mode = "k", __index = function(self, config) @@ -96,9 +83,7 @@ local config_cache do end - return function(priority) - local ServerlessFunction = { PRIORITY = priority, VERSION = kong_meta.version, diff --git a/kong/plugins/syslog/handler.lua b/kong/plugins/syslog/handler.lua index f9932db804d7..5d6dd076b443 100644 --- a/kong/plugins/syslog/handler.lua +++ b/kong/plugins/syslog/handler.lua @@ -56,9 +56,6 @@ local FACILITIES = { local7 = lsyslog.FACILITY_LOCAL7 } -local sandbox_opts = { env = { kong = kong, ngx = ngx } } - - local function send_to_syslog(log_level, severity, message, facility) if LOG_PRIORITIES[severity] <= LOG_PRIORITIES[log_level] then lsyslog.open(SENDER_NAME, FACILITIES[facility]) @@ -94,7 +91,7 @@ function SysLogHandler:log(conf) if conf.custom_fields_by_lua then local set_serialize_value = kong.log.set_serialize_value for key, expression in pairs(conf.custom_fields_by_lua) do - set_serialize_value(key, sandbox(expression, sandbox_opts)()) + set_serialize_value(key, sandbox(expression)()) end end diff --git a/kong/plugins/tcp-log/handler.lua b/kong/plugins/tcp-log/handler.lua index 06fddb1a0765..9723c44ec8fb 100644 --- a/kong/plugins/tcp-log/handler.lua +++ b/kong/plugins/tcp-log/handler.lua @@ -8,9 +8,6 @@ local ngx = ngx local timer_at = ngx.timer.at -local sandbox_opts = { env = { kong = kong, ngx = ngx } } - - local function log(premature, conf, message) if premature then return @@ -71,7 +68,7 @@ function TcpLogHandler:log(conf) if conf.custom_fields_by_lua then local set_serialize_value = kong.log.set_serialize_value for key, expression in pairs(conf.custom_fields_by_lua) do - set_serialize_value(key, sandbox(expression, sandbox_opts)()) + set_serialize_value(key, sandbox(expression)()) end end diff --git a/kong/plugins/udp-log/handler.lua b/kong/plugins/udp-log/handler.lua index ca586237d6cd..2592f92df46a 100644 --- a/kong/plugins/udp-log/handler.lua +++ b/kong/plugins/udp-log/handler.lua @@ -9,9 +9,6 @@ local timer_at = ngx.timer.at local udp = ngx.socket.udp -local sandbox_opts = { env = { kong = kong, ngx = ngx } } - - local function log(premature, conf, str) if premature then return @@ -52,7 +49,7 @@ function UdpLogHandler:log(conf) if conf.custom_fields_by_lua then local set_serialize_value = kong.log.set_serialize_value for key, expression in pairs(conf.custom_fields_by_lua) do - set_serialize_value(key, sandbox(expression, sandbox_opts)()) + set_serialize_value(key, sandbox(expression)()) end end diff --git a/kong/tools/kong-lua-sandbox.lua b/kong/tools/kong-lua-sandbox.lua index d91d6dadfe84..103b4717fc55 100644 --- a/kong/tools/kong-lua-sandbox.lua +++ b/kong/tools/kong-lua-sandbox.lua @@ -1,178 +1,3 @@ -local sandbox = { - _VERSION = "kong-lua-sandbox 1.1", - _DESCRIPTION = "A pure-lua solution for running untrusted Lua code.", - _URL = "https://github.com/kong/kong-lua-sandbox", - _LICENSE = [[ - MIT LICENSE - - Copyright (c) 2021 Kong Inc - - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ]], - -} - --- quotas don't work in LuaJIT since debug.sethook works differently there -local quota_supported = type(_G.jit) == "nil" -sandbox.quota_supported = quota_supported - --- PUC-Rio Lua 5.1 does not support deactivation of bytecode -local bytecode_blocked = _ENV or type(_G.jit) == "table" -sandbox.bytecode_blocked = bytecode_blocked - --- The base environment is merged with the given env option (or an empty table, if no env provided) --- -local BASE_ENV = {} - --- List of unsafe packages/functions: --- --- * {set|get}metatable: can be used to modify the metatable of global objects (strings, integers) --- * collectgarbage: can affect performance of other systems --- * dofile: can access the server filesystem --- * _G: It has access to everything. It can be mocked to other things though. --- * load{file|string}: All unsafe because they can grant acces to global env --- * raw{get|set|equal}: Potentially unsafe --- * module|require|module: Can modify the host settings --- * string.dump: Can display confidential server info (implementation of functions) --- * math.randomseed: Can affect the host sytem --- * io.*, os.*: Most stuff there is unsafe, see below for exceptions - - --- Safe packages/functions below -([[ - -_VERSION assert error ipairs next pairs -pcall select tonumber tostring type unpack xpcall - -coroutine.create coroutine.resume coroutine.running coroutine.status -coroutine.wrap coroutine.yield - -math.abs math.acos math.asin math.atan math.atan2 math.ceil -math.cos math.cosh math.deg math.exp math.fmod math.floor -math.frexp math.huge math.ldexp math.log math.log10 math.max -math.min math.modf math.pi math.pow math.rad math.random -math.sin math.sinh math.sqrt math.tan math.tanh - -os.clock os.date os.difftime os.time - -string.byte string.char string.find string.format string.gmatch -string.gsub string.len string.lower string.match string.rep -string.reverse string.sub string.upper - -table.concat table.insert table.maxn table.remove table.sort - -]]):gsub('%S+', function(id) - local module, method = id:match('([^%.]+)%.([^%.]+)') - if module then - BASE_ENV[module] = BASE_ENV[module] or {} - BASE_ENV[module][method] = _G[module][method] - else - BASE_ENV[id] = _G[id] - end -end) - -local function protect_module(module, module_name) - return setmetatable({}, { - __index = module, - __newindex = function(_, attr_name, _) - error('Can not modify ' .. module_name .. '.' .. attr_name .. '. Protected by the sandbox.') - end - }) -end - -('coroutine math os string table'):gsub('%S+', function(module_name) - BASE_ENV[module_name] = protect_module(BASE_ENV[module_name], module_name) -end) - --- auxiliary functions/variables - -local function sethook(f, key, quota) - if type(debug) ~= 'table' or type(debug.sethook) ~= 'function' then return end - debug.sethook(f, key, quota) -end - -local function cleanup() - sethook() -end - --- Public interface: sandbox.protect -function sandbox.protect(code, options) - options = options or {} - - local quota = false - if options.quota and not quota_supported then - error("options.quota is not supported on this environment (usually LuaJIT). Please unset options.quota") - end - if options.quota ~= false then - quota = options.quota or 500000 - end - - assert(type(code) == 'string', "expected a string") - - local passed_env = options.env or {} - local env = {} - for k, v in pairs(BASE_ENV) do - local pv = passed_env[k] - if pv ~= nil then - env[k] = pv - else - env[k] = v - end - end - setmetatable(env, { __index = options.env }) - env._G = env - - local f - if bytecode_blocked then - f = assert(load(code, nil, 't', env)) - else - f = assert(loadstring(code)) - setfenv(f, env) - end - - return function(...) - - if quota and quota_supported then - local timeout = function() - cleanup() - error('Quota exceeded: ' .. tostring(quota)) - end - sethook(timeout, "", quota) - end - - local t = table.pack(pcall(f, ...)) - - cleanup() - - if not t[1] then error(t[2]) end - - return table.unpack(t, 2, t.n) - end -end - --- Public interface: sandbox.run -function sandbox.run(code, options, ...) - return sandbox.protect(code, options)(...) -end - --- make sandbox(f) == sandbox.protect(f) -setmetatable(sandbox, {__call = function(_,code,o) return sandbox.protect(code,o) end}) - -return sandbox +-- this file was moved to sandbox directory, so this is +-- just left in place for backward compatibility reasons +return require("kong.tools.sandbox.kong") diff --git a/kong/tools/sandbox.lua b/kong/tools/sandbox.lua deleted file mode 100644 index 0121ba7156d6..000000000000 --- a/kong/tools/sandbox.lua +++ /dev/null @@ -1,205 +0,0 @@ -local _sandbox = require "kong.tools.kong-lua-sandbox" - -local table = table -local fmt = string.format -local setmetatable = setmetatable -local require = require -local ipairs = ipairs -local pcall = pcall -local type = type -local error = error -local rawset = rawset -local assert = assert -local kong = kong - - --- deep copy tables using dot notation, like --- one: { foo = { bar = { hello = {}, ..., baz = 42 } } } --- target: { hey = { "hello } } --- link("foo.bar.baz", one, target) --- target -> { hey = { "hello" }, foo = { bar = { baz = 42 } } } -local function link(q, o, target) - if not q then return end - - local h, r = q:match("([^%.]+)%.?(.*)") - local mod = o[h] - - if not mod then return end - - if r == "" then - if type(mod) == 'table' then - -- changes on target[h] won't affect mod - target[h] = setmetatable({}, { __index = mod }) - - else - target[h] = mod - end - - return - end - - if not target[h] then target[h] = {} end - - link(r, o[h], target[h]) -end - - -local lazy_conf_methods = { - enabled = function(self) - return kong and - kong.configuration and - kong.configuration.untrusted_lua and - kong.configuration.untrusted_lua ~= 'off' - end, - sandbox_enabled = function(self) - return kong and - kong.configuration and - kong.configuration.untrusted_lua and - kong.configuration.untrusted_lua == 'sandbox' - end, - requires = function(self) - local conf_r = kong and - kong.configuration and - kong.configuration.untrusted_lua_sandbox_requires or {} - local requires = {} - for _, r in ipairs(conf_r) do requires[r] = true end - return requires - end, - env_vars = function(self) - return kong and - kong.configuration and - kong.configuration.untrusted_lua_sandbox_environment or {} - end, - environment = function(self) - local env = { - -- home brewed require function that only requires what we consider - -- safe :) - ["require"] = function(m) - if not self.requires[m] then - error(fmt("require '%s' not allowed within sandbox", m)) - end - - return require(m) - end, - } - - for _, m in ipairs(self.env_vars) do link(m, _G, env) end - - return env - end, -} - - -local conf_values = { - clear = table.clear, - reload = table.clear, - err_msg = "loading of untrusted Lua code disabled because " .. - "'untrusted_lua' config option is set to 'off'" -} - - -local configuration = setmetatable({}, { - __index = function(self, key) - local l = lazy_conf_methods[key] - - if not l then - return conf_values[key] - end - - local value = l(self) - rawset(self, key, value) - - return value - end, -}) - - -local sandbox = function(fn, opts) - if not configuration.enabled then - error(configuration.err_msg) - end - - opts = opts or {} - - local opts = { - -- default set load string mode to only 'text chunks' - mode = opts.mode or 't', - env = opts.env or {}, - chunk_name = opts.chunk_name, - } - - if not configuration.sandbox_enabled then - -- sandbox disabled, all arbitrary Lua code can execute unrestricted - setmetatable(opts.env, { __index = _G}) - - return assert(load(fn, opts.chunk_name, opts.mode, opts.env)) - end - - -- set (discard-able) function context - setmetatable(opts.env, { __index = configuration.environment }) - - return _sandbox(fn, opts) -end - - -local function validate_function(fun, opts) - local ok, func1 = pcall(sandbox, fun, opts) - if not ok then - return false, "Error parsing function: " .. func1 - end - - local success, func2 = pcall(func1) - - if not success then - return false, func2 - end - - if type(func2) == "function" then - return func2 - end - - -- the code returned something unknown - return false, "Bad return value from function, expected function type, got " - .. type(func2) -end - - -local function parse(fn_str, opts) - return assert(validate_function(fn_str, opts)) -end - - -local _M = {} - - -_M.validate_function = validate_function - - -_M.validate = function(fn_str, opts) - local _, err = validate_function(fn_str, opts) - if err then return false, err end - - return true -end - - --- meant for schema, do not execute arbitrary lua! --- https://github.com/Kong/kong/issues/5110 -_M.validate_safe = function(fn_str, opts) - local ok, func1 = pcall(sandbox, fn_str, opts) - - if not ok then - return false, "Error parsing function: " .. func1 - end - - return true -end - - -_M.sandbox = sandbox -_M.parse = parse --- useful for testing -_M.configuration = configuration - - -return _M diff --git a/kong/tools/sandbox/environment/handler.lua b/kong/tools/sandbox/environment/handler.lua new file mode 100644 index 000000000000..ca36b135e9a1 --- /dev/null +++ b/kong/tools/sandbox/environment/handler.lua @@ -0,0 +1,199 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + + +return require("kong.tools.sandbox.environment.lua") .. [[ +kong.cache.get kong.cache.get_bulk kong.cache.probe +kong.cache.invalidate kong.cache.invalidate_local kong.cache.safe_set +kong.cache.renew + +kong.client.authenticate kong.client.authenticate_consumer_group_by_consumer_id +kong.client.get_consumer kong.client.get_consumer_group +kong.client.get_consumer_groups kong.client.get_credential +kong.client.get_forwarded_ip kong.client.get_forwarded_port +kong.client.get_ip kong.client.get_port +kong.client.get_protocol kong.client.load_consumer +kong.client.set_authenticated_consumer_group kong.client.set_authenticated_consumer_groups + +kong.client.tls.disable_session_reuse kong.client.tls.get_full_client_certificate_chain +kong.client.tls.request_client_certificate kong.client.tls.set_client_verify + +kong.cluster.get_id + +kong.db.certificates.cache_key kong.db.certificates.select +kong.db.certificates.select_by_cache_key +kong.db.consumers.cache_key kong.db.consumers.select +kong.db.consumers.select_by_cache_key kong.db.consumers.select_by_custom_id +kong.db.consumers.select_by_username kong.db.consumers.select_by_username_ignore_case +kong.db.keys.cache_key kong.db.keys.select +kong.db.keys.select_by_cache_key kong.db.keys.select_by_name +kong.db.plugins.cache_key kong.db.plugins.select +kong.db.plugins.select_by_cache_key kong.db.plugins.select_by_instance_name +kong.db.routes.cache_key kong.db.routes.select +kong.db.routes.select_by_cache_key kong.db.routes.select_by_name +kong.db.services.cache_key kong.db.services.select +kong.db.services.select_by_cache_key kong.db.services.select_by_name +kong.db.snis.cache_key kong.db.snis.select +kong.db.snis.select_by_cache_key kong.db.snis.select_by_name +kong.db.targets.cache_key kong.db.targets.select +kong.db.targets.select_by_cache_key kong.db.targets.select_by_target +kong.db.upstreams.cache_key kong.db.upstreams.select +kong.db.upstreams.select_by_cache_key kong.db.upstreams.select_by_name + +kong.default_workspace + +kong.dns.resolve kong.dns.toip + +kong.ip.is_trusted + +kong.jwe.decode kong.jwe.decrypt kong.jwe.encrypt + +kong.log.alert kong.log.crit kong.log.debug +kong.log.emerg kong.log.err kong.log.info +kong.log.notice kong.log.serialize kong.log.set_serialize_value +kong.log.warn + +kong.log.deprecation.write + +kong.log.inspect.on kong.log.inspect.off + +kong.nginx.get_statistics kong.nginx.get_subsystem + +kong.node.get_hostname kong.node.get_id kong.node.get_memory_stats + +kong.plugin.get_id + +kong.request.get_body kong.request.get_forwarded_host +kong.request.get_forwarded_path kong.request.get_forwarded_port +kong.request.get_forwarded_prefix kong.request.get_forwarded_scheme +kong.request.get_header kong.request.get_headers +kong.request.get_host kong.request.get_http_version +kong.request.get_method kong.request.get_path +kong.request.get_path_with_query kong.request.get_port +kong.request.get_query kong.request.get_query_arg +kong.request.get_raw_body kong.request.get_raw_path +kong.request.get_raw_query kong.request.get_scheme +kong.request.get_start_time kong.request.get_uri_captures + +kong.response.add_header kong.response.clear_header +kong.response.error kong.response.exit +kong.response.get_header kong.response.get_headers +kong.response.get_raw_body kong.response.get_source +kong.response.get_status kong.response.set_header +kong.response.set_headers kong.response.set_raw_body +kong.response.set_status + +kong.router.get_route kong.router.get_service + +kong.service.set_retries kong.service.set_target +kong.service.set_target_retry_callback kong.service.set_timeouts +kong.service.set_tls_cert_key kong.service.set_tls_cert_key +kong.service.set_tls_verify kong.service.set_tls_verify_depth +kong.service.set_tls_verify_store kong.service.set_tls_verify_store +kong.service.set_upstream + +kong.service.request.add_header kong.service.request.clear_header +kong.service.request.clear_query_arg kong.service.request.enable_buffering +kong.service.request.set_body kong.service.request.set_header +kong.service.request.set_headers kong.service.request.set_method +kong.service.request.set_path kong.service.request.set_query +kong.service.request.set_raw_body kong.service.request.set_raw_query +kong.service.request.set_scheme + +kong.service.response.get_body kong.service.response.get_header +kong.service.response.get_headers kong.service.response.get_raw_body +kong.service.response.get_status + +kong.table.clear kong.table.merge + +kong.telemetry.log + +kong.tracing.create_span kong.tracing.get_sampling_decision +kong.tracing.link_span kong.tracing.process_span +kong.tracing.set_active_span kong.tracing.set_should_sample +kong.tracing.start_span + +kong.vault.get kong.vault.is_reference kong.vault.parse_reference +kong.vault.try kong.vault.update + +kong.version kong.version_num + +kong.websocket.client.close kong.websocket.client.drop_frame +kong.websocket.client.get_frame kong.websocket.client.set_frame_data +kong.websocket.client.set_max_payload_size kong.websocket.client.set_status + +kong.websocket.upstream.close kong.websocket.upstream.drop_frame +kong.websocket.upstream.get_frame kong.websocket.upstream.set_frame_data +kong.websocket.upstream.set_max_payload_size kong.websocket.upstream.set_status + +ngx.AGAIN ngx.ALERT +ngx.CRIT ngx.DEBUG +ngx.DECLINED ngx.DONE +ngx.EMERG ngx.ERR +ngx.ERROR ngx.HTTP_ACCEPTED +ngx.HTTP_BAD_GATEWAY ngx.HTTP_BAD_REQUEST +ngx.HTTP_CLOSE ngx.HTTP_CONFLICT +ngx.HTTP_CONTINUE ngx.HTTP_COPY +ngx.HTTP_CREATED ngx.HTTP_DELETE +ngx.HTTP_FORBIDDEN ngx.HTTP_GATEWAY_TIMEOUT +ngx.HTTP_GET ngx.HTTP_GONE +ngx.HTTP_HEAD ngx.HTTP_ILLEGAL +ngx.HTTP_INSUFFICIENT_STORAGE ngx.HTTP_INTERNAL_SERVER_ERROR +ngx.HTTP_LOCK ngx.HTTP_METHOD_NOT_IMPLEMENTED +ngx.HTTP_MKCOL ngx.HTTP_MOVE +ngx.HTTP_MOVED_PERMANENTLY ngx.HTTP_MOVED_TEMPORARILY +ngx.HTTP_NOT_ACCEPTABLE ngx.HTTP_NOT_ALLOWED +ngx.HTTP_NOT_FOUND ngx.HTTP_NOT_IMPLEMENTED +ngx.HTTP_NOT_MODIFIED ngx.HTTP_NO_CONTENT +ngx.HTTP_OK ngx.HTTP_OPTIONS +ngx.HTTP_PARTIAL_CONTENT ngx.HTTP_PATCH +ngx.HTTP_PAYMENT_REQUIRED ngx.HTTP_PERMANENT_REDIRECT +ngx.HTTP_POST ngx.HTTP_PROPFIND +ngx.HTTP_PROPPATCH ngx.HTTP_PUT +ngx.HTTP_REQUEST_TIMEOUT ngx.HTTP_SEE_OTHER +ngx.HTTP_SERVICE_UNAVAILABLE ngx.HTTP_SPECIAL_RESPONSE +ngx.HTTP_SWITCHING_PROTOCOLS ngx.HTTP_TEMPORARY_REDIRECT +ngx.HTTP_TOO_MANY_REQUESTS ngx.HTTP_TRACE +ngx.HTTP_UNAUTHORIZED ngx.HTTP_UNLOCK +ngx.HTTP_UPGRADE_REQUIRED ngx.HTTP_VERSION_NOT_SUPPORTED +ngx.INFO ngx.NOTICE +ngx.OK ngx.STDERR +ngx.WARN + +ngx.cookie_time ngx.crc32_long ngx.crc32_short ngx.decode_args +ngx.decode_base64 ngx.encode_args ngx.encode_base64 ngx.eof +ngx.escape_uri ngx.exit ngx.flush ngx.get_phase +ngx.get_raw_phase ngx.hmac_sha1 ngx.http_time ngx.localtime +ngx.log ngx.md5 ngx.md5_bin ngx.now +ngx.null ngx.parse_http_time ngx.print ngx.quote_sql_str +ngx.redirect ngx.say ngx.send_headers ngx.sha1_bin +ngx.sleep ngx.time ngx.today ngx.unescape_uri +ngx.update_time ngx.utctime + +ngx.config.debug ngx.config.nginx_version ngx.config.ngx_lua_version +ngx.config.subsystem + +ngx.location.capture ngx.location.capture_multi + +ngx.re.find ngx.re.gmatch ngx.re.gsub ngx.re.match ngx.re.sub + +ngx.req.append_body ngx.req.clear_header ngx.req.discard_body +ngx.req.finish_body ngx.req.get_body_data ngx.req.get_body_file +ngx.req.get_headers ngx.req.get_method ngx.req.get_post_args +ngx.req.get_uri_args ngx.req.http_version ngx.req.init_body +ngx.req.is_internal ngx.req.raw_header ngx.req.read_body +ngx.req.set_body_data ngx.req.set_body_file ngx.req.set_header +ngx.req.set_method ngx.req.set_uri ngx.req.set_uri_args +ngx.req.socket ngx.req.start_time ngx.resp.get_headers + +ngx.thread.kill ngx.thread.spawn ngx.thread.wait + +ngx.socket.connect ngx.socket.stream ngx.socket.tcp ngx.socket.udp + +ngx.worker.count ngx.worker.exiting ngx.worker.id ngx.worker.pid +ngx.worker.pids +]] diff --git a/kong/tools/sandbox/environment/init.lua b/kong/tools/sandbox/environment/init.lua new file mode 100644 index 000000000000..5d2966b2d812 --- /dev/null +++ b/kong/tools/sandbox/environment/init.lua @@ -0,0 +1,185 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + + +local ENVIRONMENT do + ENVIRONMENT = {} + + local setmetatable = setmetatable + local getmetatable = getmetatable + local require = require + local package = package + local rawset = rawset + local ipairs = ipairs + local pairs = pairs + local error = error + local type = type + local _G = _G + + local function wrap_method(self, method) + return function(_, ...) + return self[method](self, ...) + end + end + + local function include(env, id) + -- The code here checks a lot of types and stuff, just to please our test suite + -- to not error out when used with mocks. + local m, sm, lf, f = id:match("([^%.]+)%.([^%.]+)%.([^%.]+)%.([^%.]+)") + if m then + env[m] = env[m] or {} + env[m][sm] = env[m][sm] or {} + env[m][sm][lf] = env[m][sm][lf] or {} + + if m == "kong" and sm == "db" then + env[m][sm][lf][f] = type(_G[m]) == "table" + and type(_G[m][sm]) == "table" + and type(_G[m][sm][lf]) == "table" + and type(_G[m][sm][lf][f]) == "function" + and wrap_method(_G[m][sm][lf], f) + else + env[m][sm][lf][f] = type(_G[m]) == "table" + and type(_G[m][sm]) == "table" + and type(_G[m][sm][lf]) == "table" + and _G[m][sm][lf][f] + end + + else + m, sm, f = id:match("([^%.]+)%.([^%.]+)%.([^%.]+)") + if m then + env[m] = env[m] or {} + env[m][sm] = env[m][sm] or {} + + if m == "kong" and sm == "cache" then + env[m][sm][f] = type(_G[m]) == "table" + and type(_G[m][sm]) == "table" + and type(_G[m][sm][f]) == "function" + and wrap_method(_G[m][sm], f) + + else + env[m][sm][f] = type(_G[m]) == "table" + and type(_G[m][sm]) == "table" + and _G[m][sm][f] + end + + else + m, f = id:match("([^%.]+)%.([^%.]+)") + if m then + env[m] = env[m] or {} + env[m][f] = type(_G[m]) == "table" and _G[m][f] + + else + env[id] = _G[id] + end + end + end + end + + + local function protect_module(modname, mod) + return setmetatable(mod, { + __newindex = function(_, k, _) + return error(("Cannot modify %s.%s. Protected by the sandbox."): format(modname, k), -1) + end + }) + end + + local function protect_modules(mod, modname) + for k, v in pairs(mod) do + if type(v) == "table" then + protect_modules(v, modname and (modname .. "." .. k) or k) + end + end + + if modname and modname ~= "ngx" then + protect_module(modname, mod) + end + end + + local function protect(env) + protect_modules(env, "_G") + rawset(env, "_G", env) + + local kong = kong + local ngx = ngx + + if type(ngx) == "table" and type(env.ngx) == "table" then + -- this is needed for special ngx.{ctx|headers_sent|is_subrequest|status) + setmetatable(env.ngx, getmetatable(ngx)) + + -- libraries having metatable logic + rawset(env.ngx, "var", ngx.var) + rawset(env.ngx, "arg", ngx.arg) + rawset(env.ngx, "header", ngx.header) + end + + if type(kong) == "table" and type(env.kong) == "table" then + -- __call meta-method for kong log + if type(kong.log) == "table" and type(env.kong.log) == "table" then + getmetatable(env.kong.log).__call = (getmetatable(kong.log) or {}).__call + + if type(kong.log.inspect) == "table" and type(env.kong.log.inspect) == "table" then + getmetatable(env.kong.log.inspect).__call = (getmetatable(kong.log.inspect) or {}).__call + end + if type(kong.log.deprecation) == "table" and type(env.kong.log.deprecation) == "table" then + getmetatable(env.kong.log.deprecation).__call = (getmetatable(kong.log.deprecation) or {}).__call + end + end + + if type(kong.configuration) == "table" and type(kong.configuration.remove_sensitive) == "function" then + -- only expose the non-sensitive parts of kong.configuration + rawset(env.kong, "configuration", + protect_module("kong.configuration", kong.configuration.remove_sensitive())) + end + + if type(kong.ctx) == "table" then + -- only support kong.ctx.shared and kong.ctx.plugin + local ctx = kong.ctx + rawset(env.kong, "ctx", protect_module("kong.ctx", { + shared = setmetatable({}, { + __newindex = function(_, k, v) + ctx.shared[k] = v + end, + __index = function(_, k) + return ctx.shared[k] + end, + }), + plugin = setmetatable({}, { + __newindex = function(_, k, v) + ctx.plugin[k] = v + end, + __index = function(_, k) + return ctx.plugin[k] + end, + }) + })) + end + end + + return env + end + + local sandbox_require = require("kong.tools.sandbox.require") + + -- the order is from the biggest to the smallest so that package + -- unloading works properly (just to not leave garbage around) + for _, t in ipairs({ "handler", "schema", "lua" }) do + local env = {} + local package_name = "kong.tools.sandbox.environment." .. t + require(package_name):gsub("%S+", function(id) + include(env, id) + end) + package.loaded[package_name] = nil + rawset(env, "require", sandbox_require[t]) + ENVIRONMENT[t] = protect(env) + end + + package.loaded["kong.tools.sandbox.require"] = nil +end + + +return ENVIRONMENT diff --git a/kong/tools/sandbox/environment/lua.lua b/kong/tools/sandbox/environment/lua.lua new file mode 100644 index 000000000000..29ce8c0ad67a --- /dev/null +++ b/kong/tools/sandbox/environment/lua.lua @@ -0,0 +1,39 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + + +return [[ +_VERSION assert error ipairs next pairs pcall print select +tonumber tostring type unpack xpcall + +bit.arshift bit.band bit.bnot bit.bor bit.bswap bit.bxor +bit.lshift bit.rol bit.ror bit.rshift bit.tobit bit.tohex + +coroutine.create coroutine.resume coroutine.running +coroutine.status coroutine.wrap coroutine.yield + +io.type + +jit.os jit.arch jit.version jit.version_num + +math.abs math.acos math.asin math.atan math.atan2 math.ceil +math.cos math.cosh math.deg math.exp math.floor math.fmod +math.frexp math.huge math.ldexp math.log math.log10 math.max +math.min math.modf math.pi math.pow math.rad math.random +math.sin math.sinh math.sqrt math.tan math.tanh + +os.clock os.date os.difftime os.time + +string.byte string.char string.find string.format string.gmatch +string.gsub string.len string.lower string.match string.rep +string.reverse string.sub string.upper + +table.clear table.clone table.concat table.foreach table.foreachi +table.getn table.insert table.isarray table.isempty table.maxn +table.move table.new table.nkeys table.pack table.remove +table.sort table.unpack +]] diff --git a/kong/tools/sandbox/environment/schema.lua b/kong/tools/sandbox/environment/schema.lua new file mode 100644 index 000000000000..fc17e742b472 --- /dev/null +++ b/kong/tools/sandbox/environment/schema.lua @@ -0,0 +1,10 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + + +-- for now the schema is just using the most restricted environment +return require("kong.tools.sandbox.environment.lua") diff --git a/kong/tools/sandbox/init.lua b/kong/tools/sandbox/init.lua new file mode 100644 index 000000000000..f2ef433a4a76 --- /dev/null +++ b/kong/tools/sandbox/init.lua @@ -0,0 +1,334 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + + +local sb_kong = require("kong.tools.sandbox.kong") + + +local table = table +local setmetatable = setmetatable +local require = require +local ipairs = ipairs +local pcall = pcall +local type = type +local load = load +local error = error +local rawset = rawset +local assert = assert + + +-- deep copy tables using dot notation, like +-- one: { foo = { bar = { hello = {}, ..., baz = 42 } } } +-- target: { hey = { "hello } } +-- link("foo.bar.baz", one, target) +-- target -> { hey = { "hello" }, foo = { bar = { baz = 42 } } } +local function link(q, o, target) + if not q then + return + end + + local h, r = q:match("([^%.]+)%.?(.*)") + + local mod = o[h] + if not mod then + return + end + + if r == "" then + if type(mod) == "table" then + -- changes on target[h] won't affect mod + target[h] = setmetatable({}, { __index = mod }) + + else + target[h] = mod + end + + return + end + + if not target[h] then + target[h] = {} + end + + link(r, o[h], target[h]) +end + + +local function get_conf(name) + return kong + and kong.configuration + and kong.configuration[name] +end + + +local function link_vars(vars, env) + if vars then + for _, m in ipairs(vars) do + link(m, _G, env) + end + end + + env._G = env + + return env +end + + +local function denied_table(modname) + return setmetatable({}, { __index = {}, __newindex = function(_, k) + return error(("Cannot modify %s.%s. Protected by the sandbox."):format(modname, k), -1) + end, __tostring = function() + return "nil" + end }) +end + + +local function denied_require(modname) + return error(("require '%s' not allowed within sandbox"):format(modname), -1) +end + + +local function get_backward_compatible_sandboxed_kong() + -- this is a more like a blacklist where we try to keep backwards + -- compatibility, but still improve the default sandboxing not leaking + -- secrets like pg_password. + -- + -- just to note, kong.db.:truncate() and kong.db.connector:query(...) + -- are quite powerful, but they are not disallowed for backwards compatibility. + -- + -- of course this to work, the `getmetatable` and `require "inspect"` and such + -- need to be disabled as well. + + local k + if type(kong) == "table" then + k = setmetatable({ + licensing = denied_table("kong.licensing"), + }, { __index = kong }) + + if type(kong.cache) == "table" then + k.cache = setmetatable({ + cluster_events = denied_table("kong.cache.cluster_events") + }, { __index = kong.cache }) + end + + if type(kong.core_cache) == "table" then + k.core_cache = setmetatable({ + cluster_events = denied_table("kong.cache.cluster_events") + }, { __index = kong.core_cache }) + end + + if type(kong.configuration) == "table" and type(kong.configuration.remove_sensitive) == "function" then + k.configuration = kong.configuration.remove_sensitive() + end + + if type(kong.db) == "table" then + k.db = setmetatable({}, { __index = kong.db }) + if type(kong.db.connector) == "table" then + k.db.connector = setmetatable({ + config = denied_table("kong.db.connector.config") + }, { __index = kong.db.connector }) + end + end + end + return k +end + + +local lazy_conf_methods = { + enabled = function() + return get_conf("untrusted_lua") ~= "off" + end, + sandbox_enabled = function() + return get_conf("untrusted_lua") == "sandbox" + end, + requires = function() + local sandbox_requires = get_conf("untrusted_lua_sandbox_requires") + if type(sandbox_requires) ~= "table" or #sandbox_requires == 0 then + return + end + local requires = {} + for _, r in ipairs(sandbox_requires) do + requires[r] = true + end + return requires + end, + env_vars = function() + local env_vars = get_conf("untrusted_lua_sandbox_environment") + if type(env_vars) ~= "table" or #env_vars == 0 then + return + end + return env_vars + end, + environment = function(self) + local requires = self.requires + return link_vars(self.env_vars, requires and { + -- home brewed require function that only requires what we consider safe :) + require = function(modname) + if not requires[modname] then + return denied_require(modname) + end + return require(modname) + end, + -- allow almost full non-sandboxed access to everything in kong global + kong = get_backward_compatible_sandboxed_kong(), + -- allow full non-sandboxed access to everything in ngx global (including timers, :-() + ngx = ngx, + } or { + require = denied_require, + -- allow almost full non-sandboxed access to everything in kong global + kong = get_backward_compatible_sandboxed_kong(), + -- allow full non-sandboxed access to everything in ngx global (including timers, :-() + ngx = ngx, + }) + end, + sandbox_mt = function(self) + return { __index = self.environment } + end, + global_mt = function() + return { __index = _G } + end, +} + + +local conf_values = { + clear = table.clear, + reload = table.clear, + err_msg = "loading of untrusted Lua code disabled because " .. + "'untrusted_lua' config option is set to 'off'" +} + + +local configuration = setmetatable({}, { + __index = function(self, key) + local l = lazy_conf_methods[key] + if not l then + return conf_values[key] + end + + local value = l(self) + rawset(self, key, value) + + return value + end, +}) + + +local function sandbox_backward_compatible(chunk, chunkname_or_options, mode, env) + if not configuration.enabled then + return error(configuration.err_msg, -1) + end + + local chunkname + if type(chunkname_or_options) == "table" then + chunkname = chunkname_or_options.chunkname or chunkname_or_options.chunk_name + mode = mode or chunkname_or_options.mode or "t" + env = env or chunkname_or_options.env or {} + else + chunkname = chunkname_or_options + mode = mode or "t" + env = env or {} + end + + if not configuration.sandbox_enabled then + -- sandbox disabled, all arbitrary Lua code can execute unrestricted, + -- but do not allow direct modification of the global environment + return assert(load(chunk, chunkname, mode, setmetatable(env, configuration.global_mt))) + end + + return sb_kong(chunk, chunkname, mode, setmetatable(env, configuration.sandbox_mt)) +end + + +local function sandbox(chunk, chunkname, func) + if not configuration.enabled then + return error(configuration.err_msg, -1) + end + + if not configuration.sandbox_enabled then + -- sandbox disabled, all arbitrary Lua code can execute unrestricted, + -- but do not allow direct modification of the global environment + return assert(load(chunk, chunkname, "t", setmetatable({}, configuration.global_mt))) + end + + return func(chunk, chunkname) +end + + +local function sandbox_lua(chunk, chunkname) + return sandbox(chunk, chunkname, sb_kong.protect_lua) +end + + +local function sandbox_schema(chunk, chunkname) + return sandbox(chunk, chunkname, sb_kong.protect_schema) +end + + +local function sandbox_handler(chunk, chunkname) + return sandbox(chunk, chunkname, sb_kong.protect_handler) +end + + +local function validate_function(chunk, chunkname_or_options, mode, env) + local ok, compiled_chunk = pcall(sandbox_backward_compatible, chunk, chunkname_or_options, mode, env) + if not ok then + return false, "Error parsing function: " .. compiled_chunk + end + + local success, fn = pcall(compiled_chunk) + if not success then + return false, fn + end + + if type(fn) == "function" then + return fn + end + + -- the code returned something unknown + return false, "Bad return value from function, expected function type, got " .. type(fn) +end + + +local function parse(chunk, chunkname_or_options, mode, env) + return assert(validate_function(chunk, chunkname_or_options, mode, env)) +end + + +local function validate(chunk, chunkname_or_options, mode, env) + local _, err = validate_function(chunk, chunkname_or_options, mode, env) + if err then + return false, err + end + + return true +end + + +-- meant for schema, do not execute arbitrary lua! +-- https://github.com/Kong/kong/issues/5110 +local function validate_safe(chunk, chunkname_or_options, mode, env) + local ok, fn = pcall(sandbox_backward_compatible, chunk, chunkname_or_options, mode, env) + if not ok then + return false, "Error parsing function: " .. fn + end + + return true +end + + +return { + validate = validate, + validate_safe = validate_safe, + validate_function = validate_function, + sandbox = sandbox_backward_compatible, + sandbox_lua = sandbox_lua, + sandbox_schema = sandbox_schema, + sandbox_handler = sandbox_handler, + parse = parse, + --useful for testing + configuration = configuration, +} diff --git a/kong/tools/sandbox/kong.lua b/kong/tools/sandbox/kong.lua new file mode 100644 index 000000000000..41ecf20acc88 --- /dev/null +++ b/kong/tools/sandbox/kong.lua @@ -0,0 +1,125 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + + +local clone = require "table.clone" + + +local setmetatable = setmetatable +local rawset = rawset +local unpack = table.unpack +local assert = assert +local error = error +local pairs = pairs +local pcall = pcall +local type = type +local load = load +local pack = table.pack + + +local function get_lua_env() + return require("kong.tools.sandbox.environment").lua +end + + +local function get_schema_env() + return require("kong.tools.sandbox.environment").schema +end + + +local function get_handler_env() + return require("kong.tools.sandbox.environment").handler +end + + +local function create_lua_env(env) + local new_env = clone(get_lua_env()) + if env then + for k, v in pairs(new_env) do + rawset(new_env, k, env[k] ~= nil and env[k] or v) + end + if env.require ~= nil then + rawset(new_env, "require", env.require) + end + setmetatable(new_env, { __index = env }) + end + return new_env +end + + +local function wrap(compiled) + return function(...) + local t = pack(pcall(compiled, ...)) + if not t[1] then + return error(t[2], -1) + end + return unpack(t, 2, t.n) + end +end + + +local function protect_backward_compatible(chunk, chunkname, mode, env) + assert(type(chunk) == "string", "expected a string") + local compiled, err = load(chunk, chunkname, mode or "t", create_lua_env(env)) + if not compiled then + return error(err, -1) + end + local fn = wrap(compiled) + return fn +end + + +local sandbox = {} + + +function sandbox.protect(chunk, chunkname_or_options, mode, env) + if type(chunkname_or_options) == "table" then + return protect_backward_compatible(chunk, nil, nil, chunkname_or_options and chunkname_or_options.env) + end + return protect_backward_compatible(chunk, chunkname_or_options, mode, env) +end + + +function sandbox.run(chunk, options, ...) + return sandbox.protect(chunk, options)(...) +end + + +local function protect(chunk, chunkname, env) + assert(type(chunk) == "string", "expected a string") + local compiled, err = load(chunk, chunkname, "t", env) + if not compiled then + return error(err, -1) + end + return compiled +end + + +function sandbox.protect_lua(chunk, chunkname) + return protect(chunk, chunkname, get_lua_env()) +end + + +function sandbox.protect_schema(chunk, chunkname) + return protect(chunk, chunkname, get_schema_env()) +end + + +function sandbox.protect_handler(chunk, chunkname) + return protect(chunk, chunkname, get_handler_env()) +end + + +-- make sandbox(f) == sandbox.protect(f) +setmetatable(sandbox, { + __call = function(_, chunk, chunkname_or_options, mode, env) + return sandbox.protect(chunk, chunkname_or_options, mode, env) + end +}) + + +return sandbox diff --git a/kong/tools/sandbox/require/handler.lua b/kong/tools/sandbox/require/handler.lua new file mode 100644 index 000000000000..a25e47468214 --- /dev/null +++ b/kong/tools/sandbox/require/handler.lua @@ -0,0 +1,62 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + + +return require("kong.tools.sandbox.require.lua") .. [[ +kong.enterprise_edition.tools.redis.v2 + +cjson cjson.safe + +lyaml + +kong.constants kong.concurrency kong.meta + +kong.tools.cjson kong.tools.gzip kong.tools.ip kong.tools.mime_type +kong.tools.rand kong.tools.sha256 kong.tools.string kong.tools.table +kong.tools.time kong.tools.timestamp kong.tools.uri kong.tools.uuid +kong.tools.yield + +ngx.base64 ngx.req ngx.resp ngx.semaphore + +pgmoon pgmoon.arrays pgmoon.hstore + +pl.stringx pl.tablex + +resty.aes resty.lock resty.md5 resty.memcached resty.mysql resty.random +resty.redis resty.sha resty.sha1 resty.sha224 resty.sha256 resty.sha384 +resty.sha512 resty.string resty.upload + +resty.core.time resty.dns.resolver resty.lrucache resty.lrucache.pureffi + +resty.ada resty.ada.search +resty.aws +resty.azure +resty.cookie +resty.evp +resty.gcp +resty.http +resty.ipmatcher +resty.jit-uuid +resty.jq +resty.jwt +resty.passwdqc +resty.session +resty.rediscluster + +resty.openssl resty.openssl.bn resty.openssl.cipher resty.openssl.digest +resty.openssl.hmac resty.openssl.kdf resty.openssl.mac resty.openssl.pkey +resty.openssl.pkcs12 resty.openssl.objects resty.openssl.rand resty.openssl.version +resty.openssl.x509 + +socket.url + +tablepool + +version + +xmlua +]] diff --git a/kong/tools/sandbox/require/init.lua b/kong/tools/sandbox/require/init.lua new file mode 100644 index 000000000000..36b1c4b8fab5 --- /dev/null +++ b/kong/tools/sandbox/require/init.lua @@ -0,0 +1,48 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + + +local REQUIRES do + local require = require + local package = package + local ipairs = ipairs + local error = error + + local function denied_require(modname) + return error(("require '%s' not allowed within sandbox"):format(modname)) + end + + REQUIRES = setmetatable({}, { + __index = function() + return denied_require + end + }) + + local function generate_require(packages) + return function(modname) + if not packages[modname] then + return denied_require(modname) + end + return require(modname) + end + end + + -- the order is from the biggest to the smallest so that package + -- unloading works properly (just to not leave garbage around) + for _, t in ipairs({ "handler", "schema", "lua" }) do + local packages = {} + local package_name = "kong.tools.sandbox.require." .. t + require(package_name):gsub("%S+", function(modname) + packages[modname] = true + end) + package.loaded[package_name] = nil + REQUIRES[t] = generate_require(packages) + end +end + + +return REQUIRES diff --git a/kong/tools/sandbox/require/lua.lua b/kong/tools/sandbox/require/lua.lua new file mode 100644 index 000000000000..218f54b4a050 --- /dev/null +++ b/kong/tools/sandbox/require/lua.lua @@ -0,0 +1,15 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + + +return [[ +bit + +string.buffer + +table.clear table.clone table.isarray table.isempty table.new table.nkeys +]] diff --git a/kong/tools/sandbox/require/schema.lua b/kong/tools/sandbox/require/schema.lua new file mode 100644 index 000000000000..852bbb3caef2 --- /dev/null +++ b/kong/tools/sandbox/require/schema.lua @@ -0,0 +1,13 @@ +-- This software is copyright Kong Inc. and its licensors. +-- Use of the software is subject to the agreement between your organization +-- and Kong Inc. If there is no such agreement, use is governed by and +-- subject to the terms of the Kong Master Software License Agreement found +-- at https://konghq.com/enterprisesoftwarelicense/. +-- [ END OF LICENSE 0867164ffc95e54f04670b5169c09574bdbd9bba ] + + +return require("kong.tools.sandbox.require.lua") .. [[ +kong.db.schema.typedefs kong.tools.redis.schema + +kong.enterprise_edition.tools.redis.v2 kong.enterprise_edition.tools.redis.v2.schema +]] diff --git a/spec/01-unit/20-sandbox_spec.lua b/spec/01-unit/20-sandbox_spec.lua index 4e5fa5deab96..96bed14fd070 100644 --- a/spec/01-unit/20-sandbox_spec.lua +++ b/spec/01-unit/20-sandbox_spec.lua @@ -20,8 +20,8 @@ describe("sandbox functions wrapper", function() -- load and reference module we can spy on load_s = spy.new(load) _G.load = load_s - _sandbox = spy.new(require "kong.tools.kong-lua-sandbox") - package.loaded["kong.tools.kong-lua-sandbox"] = _sandbox + _sandbox = spy.new(require "kong.tools.sandbox.kong") + package.loaded["kong.tools.sandbox.kong"] = _sandbox sandbox = require "kong.tools.sandbox" end) diff --git a/spec/03-plugins/18-acl/02-access_spec.lua b/spec/03-plugins/18-acl/02-access_spec.lua index 8b69f0f24346..e8f62fd58b9d 100644 --- a/spec/03-plugins/18-acl/02-access_spec.lua +++ b/spec/03-plugins/18-acl/02-access_spec.lua @@ -729,19 +729,15 @@ for _, strategy in helpers.each_strategy() do hosts = { "acl14.test" } }) - local acl_prefunction_code = " local consumer_id = \"" .. tostring(consumer2.id) .. "\"\n" .. [[ - local cache_key = kong.db.acls:cache_key(consumer_id) - - -- we must use shadict to get the cache, because the `kong.cache` was hooked by `kong.plugins.pre-function` - local raw_groups, err = ngx.shared.kong_db_cache:get("kong_db_cache"..cache_key) - if raw_groups then + local acl_prefunction_code = ([[ + local ok, err = kong.cache:get(%q) + if ok then ngx.exit(200) else ngx.log(ngx.ERR, "failed to get cache: ", err) ngx.exit(500) end - - ]] + ]]):format(kong.db.acls:cache_key(tostring(consumer2.id))) bp.plugins:insert { route = { id = route14.id }, diff --git a/spec/03-plugins/36-request-transformer/02-access_spec.lua b/spec/03-plugins/36-request-transformer/02-access_spec.lua index f027aaf267aa..0104054f83be 100644 --- a/spec/03-plugins/36-request-transformer/02-access_spec.lua +++ b/spec/03-plugins/36-request-transformer/02-access_spec.lua @@ -1462,7 +1462,7 @@ describe("Plugin: request-transformer(access) [#" .. strategy .. "]", function() end) end) - describe("append ", function() + describe("append", function() it("new header if header does not exists", function() local r = assert(client:send { method = "GET", @@ -1638,7 +1638,7 @@ describe("Plugin: request-transformer(access) [#" .. strategy .. "]", function() end) end) - describe("remove, replace, add and append ", function() + describe("remove, replace, add and append", function() it("removes a header", function() local r = assert(client:send { method = "GET", @@ -2657,7 +2657,7 @@ describe("Plugin: request-transformer(access) [#" .. strategy .. "] untrusted_lu name = "request-transformer", config = { add = { - headers = {"x-added:$(require('inspect')('somestring'))"}, + headers = {"x-added:$(require('ngx.process')('somestring'))"}, } } } @@ -2722,7 +2722,7 @@ describe("Plugin: request-transformer(access) [#" .. strategy .. "] untrusted_lu end) it("should fail when template tries to require non whitelisted module from sandbox", function() - local pattern = [[require 'inspect' not allowed within sandbox]] + local pattern = [[require 'ngx.process' not allowed within sandbox]] local start_count = count_log_lines(pattern) local r = assert(client:send { From 9dded314d5cdad3500d7ac0d5b865b0b34a89a0e Mon Sep 17 00:00:00 2001 From: Michael Martin Date: Mon, 16 Dec 2024 09:50:59 -0800 Subject: [PATCH 03/18] fix(dbless): nest unique field errors for child entities (#14017) * fix(dbless): nest unique field errors for child entities This applies the same fix as b8891eb969a00469afb2a70d8a4a605497c1fc40 but to a different code path that checks fields with a unique constraint. * improve docstring --- .../fix-dbless-consumer-credential-error.yml | 3 + kong/db/schema/others/declarative_config.lua | 38 +++-- .../04-admin_api/15-off_spec.lua | 156 ++++++++++++++++++ 3 files changed, 179 insertions(+), 18 deletions(-) create mode 100644 changelog/unreleased/kong/fix-dbless-consumer-credential-error.yml diff --git a/changelog/unreleased/kong/fix-dbless-consumer-credential-error.yml b/changelog/unreleased/kong/fix-dbless-consumer-credential-error.yml new file mode 100644 index 000000000000..b459e8fab9b4 --- /dev/null +++ b/changelog/unreleased/kong/fix-dbless-consumer-credential-error.yml @@ -0,0 +1,3 @@ +message: "Fixed an issue where `POST /config?flatten_errors=1` could not return a proper response if the input contained duplicate consumer credentials." +type: bugfix +scope: Core diff --git a/kong/db/schema/others/declarative_config.lua b/kong/db/schema/others/declarative_config.lua index 7cae8ffe3c64..104fb9174416 100644 --- a/kong/db/schema/others/declarative_config.lua +++ b/kong/db/schema/others/declarative_config.lua @@ -361,6 +361,22 @@ local function uniqueness_error_msg(entity, key, value) "with " .. key .. " set to '" .. value .. "' already declared" end +local function add_error(errs, parent_entity, parent_idx, entity, entity_idx, err) + if parent_entity and parent_idx then + errs[parent_entity] = errs[parent_entity] or {} + errs[parent_entity][parent_idx] = errs[parent_entity][parent_idx] or {} + errs[parent_entity][parent_idx][entity] = errs[parent_entity][parent_idx][entity] or {} + + -- e.g. errs["upstreams"][5]["targets"][2] + errs[parent_entity][parent_idx][entity][entity_idx] = err + + else + errs[entity] = errs[entity] or {} + + -- e.g. errs["consumers"][3] + errs[entity][entity_idx] = err + end +end local function populate_references(input, known_entities, by_id, by_key, expected, errs, parent_entity, parent_idx) for _, entity in ipairs(known_entities) do @@ -400,8 +416,8 @@ local function populate_references(input, known_entities, by_id, by_key, expecte if key and key ~= ngx.null then local ok = add_to_by_key(by_key, entity_schema, item, entity, key) if not ok then - errs[entity] = errs[entity] or {} - errs[entity][i] = uniqueness_error_msg(entity, endpoint_key, key) + add_error(errs, parent_entity, parent_idx, entity, i, + uniqueness_error_msg(entity, endpoint_key, key)) failed = true end end @@ -409,22 +425,8 @@ local function populate_references(input, known_entities, by_id, by_key, expecte if item_id then by_id[entity] = by_id[entity] or {} if (not failed) and by_id[entity][item_id] then - local err_t - - if parent_entity and parent_idx then - errs[parent_entity] = errs[parent_entity] or {} - errs[parent_entity][parent_idx] = errs[parent_entity][parent_idx] or {} - errs[parent_entity][parent_idx][entity] = errs[parent_entity][parent_idx][entity] or {} - - -- e.g. errs["upstreams"][5]["targets"] - err_t = errs[parent_entity][parent_idx][entity] - - else - errs[entity] = errs[entity] or {} - err_t = errs[entity] - end - - err_t[i] = uniqueness_error_msg(entity, "primary key", item_id) + add_error(errs, parent_entity, parent_idx, entity, i, + uniqueness_error_msg(entity, "primary key", item_id)) else by_id[entity][item_id] = item diff --git a/spec/02-integration/04-admin_api/15-off_spec.lua b/spec/02-integration/04-admin_api/15-off_spec.lua index 554b445fcedf..288365c6e75b 100644 --- a/spec/02-integration/04-admin_api/15-off_spec.lua +++ b/spec/02-integration/04-admin_api/15-off_spec.lua @@ -2323,6 +2323,162 @@ R6InCcH2Wh8wSeY5AuDXvu2tv9g/PW9wIJmPuKSHMA== }, post_config(input)) end) + it("correctly handles nested entity errors", function() + local consumers = { + { + id = "cdac30ee-cd7e-465c-99b6-84f3e4e17015", + username = "consumer-01", + tags = { "consumer-01" }, + basicauth_credentials = { + { + id = "089091f4-cb8b-48f5-8463-8319097be716", + username = "user-01", password = "pwd", + tags = { "consumer-01-credential-01" }, + }, + { + id = "b1443d61-ccd9-4359-b82a-f37515442295", + username = "user-11", password = "pwd", + tags = { "consumer-01-credential-02" }, + }, + { + id = "2603d010-edbe-4713-94ef-145e281cbf4c", + username = "user-02", password = "pwd", + tags = { "consumer-01-credential-03" }, + }, + { + id = "760cf441-613c-48a2-b377-36aebc9f9ed0", + -- unique violation! + username = "user-11", password = "pwd", + tags = { "consumer-01-credential-04" }, + } + }, + }, + { + id = "c0c021f5-dae1-4031-bcf6-42e3c4d9ced9", + username = "consumer-02", + tags = { "consumer-02" }, + basicauth_credentials = { + { + id = "d0cd1919-bb07-4c85-b407-f33feb74f6e2", + username = "user-99", password = "pwd", + tags = { "consumer-02-credential-01" }, + } + }, + }, + { + id = "9acb0270-73aa-4968-b9e4-a4924e4aced5", + username = "consumer-03", + tags = { "consumer-03" }, + basicauth_credentials = { + { + id = "7e8bcd10-cdcd-41f1-8c4d-9790d34aa67d", + -- unique violation! + username = "user-01", password = "pwd", + tags = { "consumer-03-credential-01" }, + }, + { + id = "7fe186bd-44e5-4b97-854d-85a24929889d", + username = "user-33", password = "pwd", + tags = { "consumer-03-credential-02" }, + }, + { + id = "6547c325-5332-41fc-a954-d4972926cdb5", + -- unique violation! + username = "user-02", password = "pwd", + tags = { "consumer-03-credential-03" }, + }, + { + id = "e091a955-9ee1-4403-8d0a-a7f1f8bafaca", + -- unique violation! + username = "user-33", password = "pwd", + tags = { "consumer-03-credential-04" }, + } + }, + } + } + + local input = { + _format_version = "3.0", + consumers = consumers, + } + + validate({ + -- consumer 1 / credential 4 + { + entity = { + consumer = { id = consumers[1].id }, + id = consumers[1].basicauth_credentials[4].id, + tags = consumers[1].basicauth_credentials[4].tags, + password = "pwd", + username = "user-11" + }, + entity_id = consumers[1].basicauth_credentials[4].id, + entity_tags = consumers[1].basicauth_credentials[4].tags, + entity_type = "basicauth_credential", + errors = { { + message = "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-11' already declared", + type = "entity" + } } + }, + + -- consumer 3 / credential 1 + { + entity = { + consumer = { id = consumers[3].id }, + id = consumers[3].basicauth_credentials[1].id, + tags = consumers[3].basicauth_credentials[1].tags, + password = "pwd", + username = "user-01" + }, + entity_id = consumers[3].basicauth_credentials[1].id, + entity_tags = consumers[3].basicauth_credentials[1].tags, + entity_type = "basicauth_credential", + errors = { { + message = "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-01' already declared", + type = "entity" + } } + }, + + -- consumer 3 / credential 3 + { + entity = { + consumer = { id = consumers[3].id }, + id = consumers[3].basicauth_credentials[3].id, + tags = consumers[3].basicauth_credentials[3].tags, + password = "pwd", + username = "user-02" + }, + entity_id = consumers[3].basicauth_credentials[3].id, + entity_tags = consumers[3].basicauth_credentials[3].tags, + entity_type = "basicauth_credential", + errors = { { + message = "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-02' already declared", + type = "entity" + } } + }, + + -- consumer 3 / credential 4 + { + entity = { + consumer = { id = consumers[3].id }, + id = consumers[3].basicauth_credentials[4].id, + tags = consumers[3].basicauth_credentials[4].tags, + password = "pwd", + username = "user-33" + }, + entity_id = consumers[3].basicauth_credentials[4].id, + entity_tags = consumers[3].basicauth_credentials[4].tags, + entity_type = "basicauth_credential", + errors = { { + message = "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-33' already declared", + type = "entity" + } } + }, + + + }, post_config(input)) + end) + it("preserves IDs from the input", function() local id = "0175e0e8-3de9-56b4-96f1-b12dcb4b6691" local service = { From f10cd09bc4c5f515c72b2987d441dc1d5a389653 Mon Sep 17 00:00:00 2001 From: Michael Martin Date: Mon, 16 Dec 2024 10:09:33 -0800 Subject: [PATCH 04/18] chore(wasm): remove bundled datakit filter (#14013) * chore(wasm): remove bundled datakit filter * change log level --- build/openresty/wasmx/filters/variables.bzl | 12 +----------- changelog/unreleased/kong/bump-datakit.yml | 2 -- changelog/unreleased/kong/remove-datakit.yml | 2 ++ kong/conf_loader/init.lua | 2 +- spec/01-unit/03-conf_loader_spec.lua | 12 ++++-------- spec/02-integration/20-wasm/07-reports_spec.lua | 2 +- 6 files changed, 9 insertions(+), 23 deletions(-) delete mode 100644 changelog/unreleased/kong/bump-datakit.yml create mode 100644 changelog/unreleased/kong/remove-datakit.yml diff --git a/build/openresty/wasmx/filters/variables.bzl b/build/openresty/wasmx/filters/variables.bzl index 10d44e52f93f..0afea9a308bd 100644 --- a/build/openresty/wasmx/filters/variables.bzl +++ b/build/openresty/wasmx/filters/variables.bzl @@ -2,17 +2,7 @@ A list of wasm filters. """ -WASM_FILTERS = [ - { - "name": "datakit-filter", - "repo": "Kong/datakit", - "tag": "0.3.1", - "files": { - "datakit.meta.json": "acd16448615ea23315e68d4516edd79135bae13469f7bf9129f7b1139cd2b873", - "datakit.wasm": "c086e6fb36a6ed8c9ff3284805485c7280380469b6a556ccf7e5bc06edce27e7", - }, - }, -] +WASM_FILTERS = [] WASM_FILTERS_TARGETS = [ "@%s-%s//file" % (filter["name"], file) diff --git a/changelog/unreleased/kong/bump-datakit.yml b/changelog/unreleased/kong/bump-datakit.yml deleted file mode 100644 index b3b0eb3bcd98..000000000000 --- a/changelog/unreleased/kong/bump-datakit.yml +++ /dev/null @@ -1,2 +0,0 @@ -message: "Bumped the bundled `datakit` Wasm filter to `0.3.1`" -type: dependency diff --git a/changelog/unreleased/kong/remove-datakit.yml b/changelog/unreleased/kong/remove-datakit.yml new file mode 100644 index 000000000000..4997628b6aa2 --- /dev/null +++ b/changelog/unreleased/kong/remove-datakit.yml @@ -0,0 +1,2 @@ +message: "**Wasm**: Removed the experimental datakit Wasm filter" +type: dependency diff --git a/kong/conf_loader/init.lua b/kong/conf_loader/init.lua index 56fb58278266..a2898a5e51f7 100644 --- a/kong/conf_loader/init.lua +++ b/kong/conf_loader/init.lua @@ -675,7 +675,7 @@ local function load(path, custom_conf, opts) bundled_filter_path = alt_path else - log.warn("Bundled proxy-wasm filters path (%s) does not exist " .. + log.debug("Bundled proxy-wasm filters path (%s) does not exist " .. "or is not a directory. Bundled filters may not be " .. "available", bundled_filter_path) end diff --git a/spec/01-unit/03-conf_loader_spec.lua b/spec/01-unit/03-conf_loader_spec.lua index f0b8e492596c..355637185380 100644 --- a/spec/01-unit/03-conf_loader_spec.lua +++ b/spec/01-unit/03-conf_loader_spec.lua @@ -340,7 +340,7 @@ describe("Configuration loader", function() assert.is_not_nil(conf) assert.is_not_nil(conf.admin_gui_origin) assert.same({ "http://localhost:8002" }, conf.admin_gui_origin) - + conf, _, errors = conf_loader(nil, { admin_gui_url = "http://localhost:8002/manager, https://localhost:8445/manager", }) @@ -2062,12 +2062,7 @@ describe("Configuration loader", function() })) assert(conf.wasm_bundled_filters_path) - bundled_filters = { - { - name = "datakit", - path = conf.wasm_bundled_filters_path .. "/datakit.wasm", - }, - } + bundled_filters = {} end all_filters = {} @@ -2164,7 +2159,8 @@ describe("Configuration loader", function() assert.same(bundled_filters, conf.wasm_modules_parsed) end) - it("prefers user filters to bundled filters when a conflict exists", function() + -- XXX: we don't have any bundled filters to use for this test + pending("prefers user filters to bundled filters when a conflict exists", function() local user_filter = temp_dir .. "/datakit.wasm" assert(helpers.file.write(user_filter, "I'm a happy little wasm filter")) finally(function() diff --git a/spec/02-integration/20-wasm/07-reports_spec.lua b/spec/02-integration/20-wasm/07-reports_spec.lua index d62569c7fd4c..80faf3ec209b 100644 --- a/spec/02-integration/20-wasm/07-reports_spec.lua +++ b/spec/02-integration/20-wasm/07-reports_spec.lua @@ -82,7 +82,7 @@ for _, strategy in helpers.each_strategy() do local _, reports_data = assert(reports_server:join()) reports_data = cjson.encode(reports_data) - assert.match("wasm_cnt=3", reports_data) + assert.match("wasm_cnt=2", reports_data) end) it("logs number of requests triggering a Wasm filter", function() From eeded7874b46099b8ea19ca0e768c857a09068d7 Mon Sep 17 00:00:00 2001 From: Chrono Date: Tue, 17 Dec 2024 02:30:26 +0800 Subject: [PATCH 05/18] refactor(clustering/sync): clean the logic of sync_once_impl() (#14024) --- kong/clustering/services/sync/rpc.lua | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/kong/clustering/services/sync/rpc.lua b/kong/clustering/services/sync/rpc.lua index 4100afbb9675..006a8c4bb439 100644 --- a/kong/clustering/services/sync/rpc.lua +++ b/kong/clustering/services/sync/rpc.lua @@ -422,26 +422,28 @@ function sync_once_impl(premature, retry_count) return end - sync_handler() - - local latest_notified_version = ngx.shared.kong:get(CLUSTERING_DATA_PLANES_LATEST_VERSION_KEY) - local current_version = tonumber(declarative.get_current_hash()) or 0 + sync_handler() + local latest_notified_version = ngx.shared.kong:get(CLUSTERING_DATA_PLANES_LATEST_VERSION_KEY) if not latest_notified_version then ngx_log(ngx_DEBUG, "no version notified yet") return end - -- retry if the version is not updated - if current_version < latest_notified_version then - retry_count = retry_count or 0 - if retry_count > MAX_RETRY then - ngx_log(ngx_ERR, "sync_once retry count exceeded. retry_count: ", retry_count) - return - end + local current_version = tonumber(declarative.get_current_hash()) or 0 + if current_version >= latest_notified_version then + ngx_log(ngx_DEBUG, "version already updated") + return + end - return start_sync_once_timer(retry_count + 1) + -- retry if the version is not updated + retry_count = retry_count or 0 + if retry_count > MAX_RETRY then + ngx_log(ngx_ERR, "sync_once retry count exceeded. retry_count: ", retry_count) + return end + + return start_sync_once_timer(retry_count + 1) end From fad17ba3c6d4701a78e784e3cd45a846fddb9f99 Mon Sep 17 00:00:00 2001 From: Xiaochen Wang Date: Tue, 17 Dec 2024 14:10:23 +0800 Subject: [PATCH 06/18] fix(clustering/rpc): support `cluster_use_proxy` option for clustering rpc protocol (#13971) The original hybrid mode connections like full sync (sync v1) support forward proxy via the option `cluster_use_proxy`. While clustering RPC protocol does not support this, this commit introduces this feature to RPC protocol. https://konghq.atlassian.net/browse/KAG-5555 --- kong/clustering/rpc/manager.lua | 12 ++++++++++++ kong/clustering/utils.lua | 4 ++-- .../09-hybrid_mode/10-forward-proxy_spec.lua | 15 ++++++++++++--- .../14-dp_privileged_agent_spec.lua | 1 + 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/kong/clustering/rpc/manager.lua b/kong/clustering/rpc/manager.lua index c3925c5073cb..3d08963b4687 100644 --- a/kong/clustering/rpc/manager.lua +++ b/kong/clustering/rpc/manager.lua @@ -30,6 +30,7 @@ local cjson_encode = cjson.encode local cjson_decode = cjson.decode local validate_client_cert = clustering_tls.validate_client_cert local CLUSTERING_PING_INTERVAL = constants.CLUSTERING_PING_INTERVAL +local parse_proxy_url = require("kong.clustering.utils").parse_proxy_url local RPC_MATA_V1 = "kong.meta.v1" @@ -474,6 +475,17 @@ function _M:connect(premature, node_id, host, path, cert, key) local c = assert(client:new(WS_OPTS)) + if self.conf.cluster_use_proxy then + local proxy_opts = parse_proxy_url(self.conf.proxy_server) + opts.proxy_opts = { + wss_proxy = proxy_opts.proxy_url, + wss_proxy_authorization = proxy_opts.proxy_authorization, + } + + ngx_log(ngx_DEBUG, "[rpc] using proxy ", proxy_opts.proxy_url, + " to connect control plane") + end + local ok, err = c:connect(uri, opts) if not ok then ngx_log(ngx_ERR, "[rpc] unable to connect to peer: ", err) diff --git a/kong/clustering/utils.lua b/kong/clustering/utils.lua index 5ee56d30bafc..ee34e7dce2e4 100644 --- a/kong/clustering/utils.lua +++ b/kong/clustering/utils.lua @@ -33,7 +33,7 @@ local CLUSTER_PROXY_SSL_TERMINATOR_SOCK = fmt("unix:%s/%s", local _M = {} -local function parse_proxy_url(proxy_server) +function _M.parse_proxy_url(proxy_server) local ret = {} if proxy_server then @@ -84,7 +84,7 @@ function _M.connect_cp(dp, endpoint, protocols) } if conf.cluster_use_proxy then - local proxy_opts = parse_proxy_url(conf.proxy_server) + local proxy_opts = _M.parse_proxy_url(conf.proxy_server) opts.proxy_opts = { wss_proxy = proxy_opts.proxy_url, wss_proxy_authorization = proxy_opts.proxy_authorization, diff --git a/spec/02-integration/09-hybrid_mode/10-forward-proxy_spec.lua b/spec/02-integration/09-hybrid_mode/10-forward-proxy_spec.lua index a7f11e41059e..27856b4554ee 100644 --- a/spec/02-integration/09-hybrid_mode/10-forward-proxy_spec.lua +++ b/spec/02-integration/09-hybrid_mode/10-forward-proxy_spec.lua @@ -71,11 +71,13 @@ local proxy_configs = { -- if existing lmdb data is set, the service/route exists and -- test run too fast before the proxy connection is established --- XXX FIXME: enable inc_sync = on -for _, inc_sync in ipairs { "off" } do +for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do + local rpc, inc_sync = v[1], v[2] for _, strategy in helpers.each_strategy() do for proxy_desc, proxy_opts in pairs(proxy_configs) do - describe("CP/DP sync through proxy (" .. proxy_desc .. ") works with #" .. strategy .. " inc_sync=" .. inc_sync .. " backend", function() + describe("CP/DP sync through proxy (" .. proxy_desc .. ") works with #" + .. strategy .. " rpc=" .. rpc .. " inc_sync=" .. inc_sync + .. " backend", function() lazy_setup(function() helpers.get_db_utils(strategy) -- runs migrations @@ -87,6 +89,7 @@ for _, strategy in helpers.each_strategy() do db_update_frequency = 0.1, cluster_listen = "127.0.0.1:9005", nginx_conf = "spec/fixtures/custom_nginx.template", + cluster_rpc = rpc, cluster_incremental_sync = inc_sync, })) @@ -108,6 +111,7 @@ for _, strategy in helpers.each_strategy() do proxy_server_ssl_verify = proxy_opts.proxy_server_ssl_verify, lua_ssl_trusted_certificate = proxy_opts.lua_ssl_trusted_certificate, + cluster_rpc = rpc, cluster_incremental_sync = inc_sync, -- this is unused, but required for the template to include a stream {} block @@ -166,6 +170,11 @@ for _, strategy in helpers.each_strategy() do if auth_on then assert.matches("accepted basic proxy%-authorization", contents) end + + -- check the debug log of the `cluster_use_proxy` option + local line = inc_sync == "on" and "[rpc] using proxy" or + "[clustering] using proxy" + assert.logfile("servroot2/logs/error.log").has.line(line, true) end) end) end) diff --git a/spec/02-integration/09-hybrid_mode/14-dp_privileged_agent_spec.lua b/spec/02-integration/09-hybrid_mode/14-dp_privileged_agent_spec.lua index b0743edb7467..1c5e351bf874 100644 --- a/spec/02-integration/09-hybrid_mode/14-dp_privileged_agent_spec.lua +++ b/spec/02-integration/09-hybrid_mode/14-dp_privileged_agent_spec.lua @@ -20,6 +20,7 @@ describe("DP diabled Incremental Sync RPC #" .. strategy, function() cluster_listen = "127.0.0.1:9005", nginx_conf = "spec/fixtures/custom_nginx.template", + cluster_rpc = "on", cluster_incremental_sync = "on", -- ENABLE incremental sync })) From f3f77eb153f1fb247be6ea377cc9e52c9a27fae3 Mon Sep 17 00:00:00 2001 From: Wangchong Zhou Date: Tue, 17 Dec 2024 17:55:44 +0800 Subject: [PATCH 07/18] chore(build): allow github_release to download without extract (#14027) https://konghq.atlassian.net/browse/KAG-6035 --- build/build_system.bzl | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/build/build_system.bzl b/build/build_system.bzl index bfd45d0678a2..dfe2e526a039 100644 --- a/build/build_system.bzl +++ b/build/build_system.bzl @@ -171,6 +171,15 @@ def _copyright_header(ctx): # while writing utf-8 content read by |ctx.read|, let's disable it ctx.file(path, copyright_content_html + content, legacy_utf8 = False) +_GITHUB_RELEASE_SINGLE_FILE_BUILD = """\ +package(default_visibility = ["//visibility:public"]) + +filegroup( + name = "file", + srcs = ["{}"], +) +""" + def _github_release_impl(ctx): ctx.file("WORKSPACE", "workspace(name = \"%s\")\n" % ctx.name) @@ -195,20 +204,25 @@ def _github_release_impl(ctx): fail("Unsupported OS %s" % os_name) gh_bin = "%s" % ctx.path(Label("@gh_%s_%s//:bin/gh" % (os_name, os_arch))) - args = [gh_bin, "release", "download", ctx.attr.tag, "-R", ctx.attr.repo] + args = [gh_bin, "release", "download", ctx.attr.tag, "--repo", ctx.attr.repo] downloaded_file = None if ctx.attr.pattern: if "/" in ctx.attr.pattern or ".." in ctx.attr.pattern: fail("/ and .. are not allowed in pattern") downloaded_file = ctx.attr.pattern.replace("*", "_") - args += ["-p", ctx.attr.pattern] + args += ["--pattern", ctx.attr.pattern] elif ctx.attr.archive: args.append("--archive=" + ctx.attr.archive) downloaded_file = "gh-release." + ctx.attr.archive.split(".")[-1] else: fail("at least one of pattern or archive must be set") - args += ["-O", downloaded_file] + downloaded_file_path = downloaded_file + if not ctx.attr.extract: + ctx.file("file/BUILD", _GITHUB_RELEASE_SINGLE_FILE_BUILD.format(downloaded_file)) + downloaded_file_path = "file/" + downloaded_file + + args += ["--output", downloaded_file_path] ret = ctx.execute(args) @@ -218,10 +232,23 @@ def _github_release_impl(ctx): gh_token_set = "GITHUB_TOKEN is not set, is this a private repo?" fail("Failed to download release (%s): %s, exit: %d" % (gh_token_set, ret.stderr, ret.return_code)) - ctx.extract(downloaded_file, stripPrefix = ctx.attr.strip_prefix) + if ctx.attr.sha256: + if os_name == "macOS": + sha256_cmd = ["shasum", "-a", "256", downloaded_file_path] + else: + sha256_cmd = ["sha256sum", downloaded_file_path] + ret = ctx.execute(sha256_cmd) + checksum = ret.stdout.split(" ")[0] + if checksum != ctx.attr.sha256: + fail("Checksum mismatch: expected %s, got %s" % (ctx.attr.sha256, checksum)) + + if ctx.attr.extract: + ctx.extract(downloaded_file_path, stripPrefix = ctx.attr.strip_prefix) # only used in EE: always skip here in CE if not ctx.attr.skip_add_copyright_header and False: + if not ctx.attr.extract: + fail("Writing copyright header is only supported for extracted archives") _copyright_header(ctx) github_release = repository_rule( @@ -231,11 +258,13 @@ github_release = repository_rule( "tag": attr.string(mandatory = True), "pattern": attr.string(mandatory = False), "archive": attr.string(mandatory = False, values = ["zip", "tar.gz"]), + "extract": attr.bool(default = True, doc = "Whether to extract the downloaded archive"), "strip_prefix": attr.string(default = "", doc = "Strip prefix from downloaded files"), "repo": attr.string(mandatory = True), "build_file": attr.label(allow_single_file = True), "build_file_content": attr.string(), "skip_add_copyright_header": attr.bool(default = False, doc = "Whether to inject COPYRIGHT-HEADER into downloaded files, only required for webuis"), + "sha256": attr.string(mandatory = False), }, ) From 59118e2a046b5c993abb38128bffe12804fc5212 Mon Sep 17 00:00:00 2001 From: Chrono Date: Wed, 18 Dec 2024 09:22:40 +0800 Subject: [PATCH 08/18] refactor(clustering/rpc): rename to cluster_rpc_sync (#14025) https://konghq.atlassian.net/browse/KAG-6036 --- kong/conf_loader/constants.lua | 2 +- kong/conf_loader/init.lua | 6 +-- kong/global.lua | 2 +- kong/init.lua | 10 ++--- kong/pdk/vault.lua | 2 +- kong/runloop/handler.lua | 6 +-- kong/templates/kong_defaults.lua | 2 +- spec/01-unit/01-db/10-declarative_spec.lua | 2 +- .../01-db/11-declarative_lmdb_spec.lua | 4 +- .../02-integration/07-sdk/03-cluster_spec.lua | 10 ++--- .../09-hybrid_mode/01-sync_spec.lua | 44 +++++++++---------- .../09-hybrid_mode/02-start_stop_spec.lua | 32 +++++++------- .../09-hybrid_mode/03-pki_spec.lua | 10 ++--- .../04-cp_cluster_sync_spec.lua | 10 ++--- .../09-hybrid_mode/05-ocsp_spec.lua | 34 +++++++------- .../09-hybrid_mode/08-lazy_export_spec.lua | 24 +++++----- .../09-hybrid_mode/09-config-compat_spec.lua | 8 ++-- .../09-node-id-persistence_spec.lua | 10 ++--- .../09-hybrid_mode/10-forward-proxy_spec.lua | 12 ++--- .../09-hybrid_mode/11-status_spec.lua | 14 +++--- .../09-hybrid_mode/12-errors_spec.lua | 10 ++--- .../09-hybrid_mode/13-deprecations_spec.lua | 10 ++--- .../14-dp_privileged_agent_spec.lua | 6 +-- .../18-hybrid_rpc/01-rpc_spec.lua | 4 +- .../18-hybrid_rpc/04-concentrator_spec.lua | 6 +-- .../19-incrmental_sync/01-sync_spec.lua | 4 +- .../02-multiple_dp_nodes_spec.lua | 4 +- .../20-wasm/06-clustering_spec.lua | 16 +++---- .../20-wasm/10-wasmtime_spec.lua | 14 +++--- .../09-key-auth/04-hybrid_mode_spec.lua | 10 ++--- .../11-correlation-id/02-schema_spec.lua | 12 ++--- 31 files changed, 170 insertions(+), 170 deletions(-) diff --git a/kong/conf_loader/constants.lua b/kong/conf_loader/constants.lua index 95fff6f6867a..2e3a27b31b57 100644 --- a/kong/conf_loader/constants.lua +++ b/kong/conf_loader/constants.lua @@ -513,7 +513,7 @@ local CONF_PARSERS = { cluster_use_proxy = { typ = "boolean" }, cluster_dp_labels = { typ = "array" }, cluster_rpc = { typ = "boolean" }, - cluster_incremental_sync = { typ = "boolean" }, + cluster_rpc_sync = { typ = "boolean" }, cluster_full_sync_threshold = { typ = "number" }, cluster_cjson = { typ = "boolean" }, diff --git a/kong/conf_loader/init.lua b/kong/conf_loader/init.lua index a2898a5e51f7..3a32433b3320 100644 --- a/kong/conf_loader/init.lua +++ b/kong/conf_loader/init.lua @@ -1034,10 +1034,10 @@ local function load(path, custom_conf, opts) end end - if conf.cluster_incremental_sync and not conf.cluster_rpc then - log.warn("Cluster incremental sync has been forcibly disabled, " .. + if conf.cluster_rpc_sync and not conf.cluster_rpc then + log.warn("Cluster rpc sync has been forcibly disabled, " .. "please enable cluster RPC.") - conf.cluster_incremental_sync = false + conf.cluster_rpc_sync = false end -- parse and validate pluginserver directives diff --git a/kong/global.lua b/kong/global.lua index 55ef7adfd991..a67e612ff0c4 100644 --- a/kong/global.lua +++ b/kong/global.lua @@ -193,7 +193,7 @@ function _GLOBAL.init_worker_events(kong_config) local enable_privileged_agent = false if kong_config.dedicated_config_processing and kong_config.role == "data_plane" and - not kong.sync -- for incremental sync there is no privileged_agent + not kong.sync -- for rpc sync there is no privileged_agent then enable_privileged_agent = true end diff --git a/kong/init.lua b/kong/init.lua index a4f66a1450ab..731540f80720 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -695,7 +695,7 @@ function Kong.init() if config.cluster_rpc then kong.rpc = require("kong.clustering.rpc.manager").new(config, kong.node.get_id()) - if config.cluster_incremental_sync then + if config.cluster_rpc_sync then kong.sync = require("kong.clustering.services.sync").new(db, is_control_plane(config)) kong.sync:init(kong.rpc) end @@ -885,8 +885,8 @@ function Kong.init_worker() local is_dp_sync_v1 = is_data_plane(kong.configuration) and not kong.sync local using_dedicated = kong.configuration.dedicated_config_processing - -- CP needs to support both full and incremental sync - -- full sync is only enabled for DP if incremental sync is disabled + -- CP needs to support both v1 and v2 sync + -- v1 sync is only enabled for DP if v2 sync is disabled if is_cp or is_dp_sync_v1 then kong.clustering:init_worker() end @@ -992,7 +992,7 @@ function Kong.init_worker() plugin_servers.start() end - -- rpc and incremental sync + -- rpc and sync if is_http_module then -- init rpc connection @@ -1000,7 +1000,7 @@ function Kong.init_worker() kong.rpc:init_worker() end - -- init incremental sync + -- init sync -- should run after rpc init successfully if kong.sync then kong.sync:init_worker() diff --git a/kong/pdk/vault.lua b/kong/pdk/vault.lua index b7a1adff1841..05921a3b9c54 100644 --- a/kong/pdk/vault.lua +++ b/kong/pdk/vault.lua @@ -1444,7 +1444,7 @@ local function new(self) local not_dbless = conf.database ~= "off" -- postgres local dp_with_inc_sync = conf.role == "data_plane" and - conf.cluster_incremental_sync + conf.cluster_rpc_sync return not_dbless or dp_with_inc_sync end diff --git a/kong/runloop/handler.lua b/kong/runloop/handler.lua index 37efc7f8bb78..54eb8cf0ba30 100644 --- a/kong/runloop/handler.lua +++ b/kong/runloop/handler.lua @@ -363,10 +363,10 @@ local function new_router(version) -- like rebuild_router_timer. And it relies on core_cache to detect changes. -- -- 1. stratey off (dbless) - -- incremental_sync on: + -- rpc_sync on: -- non init worker: true(kong.core_cache) -- init worker: false - -- incremental_sync off: false + -- rpc_sync off: false -- 2. strategy on (non dbless): true(kong.core_cache) local detect_changes = kong.core_cache and (db.strategy ~= "off" or (kong.sync and get_phase() ~= "init_worker")) @@ -986,7 +986,7 @@ return { -- start some rebuild timers for -- 1. traditional mode - -- 2. DP with incremental sync on (dbless mode) + -- 2. DP with rpc sync on (dbless mode) if strategy ~= "off" or kong.sync then local worker_state_update_frequency = kong.configuration.worker_state_update_frequency or 1 diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index 939626417403..83ef5f95eb3c 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -42,7 +42,7 @@ cluster_max_payload = 16777216 cluster_use_proxy = off cluster_dp_labels = NONE cluster_rpc = off -cluster_incremental_sync = off +cluster_rpc_sync = off cluster_full_sync_threshold = 512 cluster_cjson = off diff --git a/spec/01-unit/01-db/10-declarative_spec.lua b/spec/01-unit/01-db/10-declarative_spec.lua index be683a2df37b..137bebb206e4 100644 --- a/spec/01-unit/01-db/10-declarative_spec.lua +++ b/spec/01-unit/01-db/10-declarative_spec.lua @@ -56,7 +56,7 @@ keyauth_credentials: assert.equals("services|123|fieldname|" .. sha256_hex("test"), key) end) - -- since incremental sync the param `unique_across_ws` is useless + -- since rpc sync the param `unique_across_ws` is useless -- this test case is just for compatibility it("does not omits the workspace id when 'unique_across_ws' is 'true'", function() local key = unique_field_key("services", "123", "fieldname", "test", true) diff --git a/spec/01-unit/01-db/11-declarative_lmdb_spec.lua b/spec/01-unit/01-db/11-declarative_lmdb_spec.lua index 6fbe9181c967..047fadae604a 100644 --- a/spec/01-unit/01-db/11-declarative_lmdb_spec.lua +++ b/spec/01-unit/01-db/11-declarative_lmdb_spec.lua @@ -202,7 +202,7 @@ describe("#off preserve nulls", function() local id, item = next(entities.basicauth_credentials) - -- format changed after incremental sync + -- format changed after rpc sync local cache_key = concat({ "basicauth_credentials|", item.ws_id, @@ -225,7 +225,7 @@ describe("#off preserve nulls", function() for _, plugin in pairs(entities.plugins) do if plugin.name == PLUGIN_NAME then - -- format changed after incremental sync + -- format changed after rpc sync cache_key = concat({ "plugins|", plugin.ws_id, diff --git a/spec/02-integration/07-sdk/03-cluster_spec.lua b/spec/02-integration/07-sdk/03-cluster_spec.lua index fadc4d4093d4..2aea775edc0e 100644 --- a/spec/02-integration/07-sdk/03-cluster_spec.lua +++ b/spec/02-integration/07-sdk/03-cluster_spec.lua @@ -41,10 +41,10 @@ fixtures_cp.http_mock.my_server_block = [[ ]] for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy() do - describe("PDK: kong.cluster for #" .. strategy .. " inc_sync=" .. inc_sync, function() + describe("PDK: kong.cluster for #" .. strategy .. " rpc_sync=" .. rpc_sync, function() local proxy_client lazy_setup(function() @@ -65,7 +65,7 @@ for _, strategy in helpers.each_strategy() do cluster_listen = "127.0.0.1:9005", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }, nil, nil, fixtures_cp)) assert(helpers.start_kong({ @@ -78,7 +78,7 @@ for _, strategy in helpers.each_strategy() do proxy_listen = "0.0.0.0:9002", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }, nil, nil, fixtures_dp)) end) @@ -116,4 +116,4 @@ for _, strategy in helpers.each_strategy() do end) end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/01-sync_spec.lua b/spec/02-integration/09-hybrid_mode/01-sync_spec.lua index 5f96f2738506..bb941bd4ed30 100644 --- a/spec/02-integration/09-hybrid_mode/01-sync_spec.lua +++ b/spec/02-integration/09-hybrid_mode/01-sync_spec.lua @@ -13,11 +13,11 @@ local KEY_AUTH_PLUGIN for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy() do -describe("CP/DP communication #" .. strategy .. " inc_sync=" .. inc_sync, function() +describe("CP/DP communication #" .. strategy .. " rpc_sync=" .. rpc_sync, function() lazy_setup(function() helpers.get_db_utils(strategy) -- runs migrations @@ -31,7 +31,7 @@ describe("CP/DP communication #" .. strategy .. " inc_sync=" .. inc_sync, functi cluster_listen = "127.0.0.1:9005", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -44,7 +44,7 @@ describe("CP/DP communication #" .. strategy .. " inc_sync=" .. inc_sync, functi proxy_listen = "0.0.0.0:9002", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, worker_state_update_frequency = 1, })) @@ -348,7 +348,7 @@ describe("CP/DP communication #" .. strategy .. " inc_sync=" .. inc_sync, functi end) end) -describe("CP/DP #version check #" .. strategy .. " inc_sync=" .. inc_sync, function() +describe("CP/DP #version check #" .. strategy .. " rpc_sync=" .. rpc_sync, function() -- for these tests, we do not need a real DP, but rather use the fake DP -- client so we can mock various values (e.g. node_version) describe("relaxed compatibility check:", function() @@ -368,7 +368,7 @@ describe("CP/DP #version check #" .. strategy .. " inc_sync=" .. inc_sync, funct nginx_conf = "spec/fixtures/custom_nginx.template", cluster_version_check = "major_minor", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) for _, plugin in ipairs(helpers.get_plugins_list()) do @@ -625,7 +625,7 @@ describe("CP/DP #version check #" .. strategy .. " inc_sync=" .. inc_sync, funct end) end) -describe("CP/DP config sync #" .. strategy .. " inc_sync=" .. inc_sync, function() +describe("CP/DP config sync #" .. strategy .. " rpc_sync=" .. rpc_sync, function() lazy_setup(function() helpers.get_db_utils(strategy) -- runs migrations @@ -637,7 +637,7 @@ describe("CP/DP config sync #" .. strategy .. " inc_sync=" .. inc_sync, function db_update_frequency = 3, cluster_listen = "127.0.0.1:9005", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -648,7 +648,7 @@ describe("CP/DP config sync #" .. strategy .. " inc_sync=" .. inc_sync, function cluster_cert_key = "spec/fixtures/kong_clustering.key", cluster_control_plane = "127.0.0.1:9005", proxy_listen = "0.0.0.0:9002", - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, cluster_rpc = rpc, worker_state_update_frequency = 1, })) @@ -754,7 +754,7 @@ describe("CP/DP labels #" .. strategy, function() cluster_listen = "127.0.0.1:9005", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -768,7 +768,7 @@ describe("CP/DP labels #" .. strategy, function() nginx_conf = "spec/fixtures/custom_nginx.template", cluster_dp_labels="deployment:mycloud,region:us-east-1", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -796,8 +796,8 @@ describe("CP/DP labels #" .. strategy, function() assert.equal(CLUSTERING_SYNC_STATUS.NORMAL, v.sync_status) assert.equal(CLUSTERING_SYNC_STATUS.NORMAL, v.sync_status) -- TODO: The API output does include labels and certs when the - -- incremental sync is enabled. - if inc_sync == "off" then + -- rpc sync is enabled. + if rpc_sync == "off" then assert.equal("mycloud", v.labels.deployment) assert.equal("us-east-1", v.labels.region) end @@ -822,7 +822,7 @@ describe("CP/DP cert details(cluster_mtls = shared) #" .. strategy, function() cluster_listen = "127.0.0.1:9005", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -836,7 +836,7 @@ describe("CP/DP cert details(cluster_mtls = shared) #" .. strategy, function() nginx_conf = "spec/fixtures/custom_nginx.template", cluster_dp_labels="deployment:mycloud,region:us-east-1", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -860,8 +860,8 @@ describe("CP/DP cert details(cluster_mtls = shared) #" .. strategy, function() for _, v in pairs(json.data) do if v.ip == "127.0.0.1" then -- TODO: The API output does include labels and certs when the - -- incremental sync is enabled. - if inc_sync == "off" then + -- rpc sync is enabled. + if rpc_sync == "off" then assert.equal(1888983905, v.cert_details.expiry_timestamp) end return true @@ -888,7 +888,7 @@ describe("CP/DP cert details(cluster_mtls = pki) #" .. strategy, function() cluster_mtls = "pki", cluster_ca_cert = "spec/fixtures/kong_clustering_ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -905,7 +905,7 @@ describe("CP/DP cert details(cluster_mtls = pki) #" .. strategy, function() cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/kong_clustering.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -929,8 +929,8 @@ describe("CP/DP cert details(cluster_mtls = pki) #" .. strategy, function() for _, v in pairs(json.data) do if v.ip == "127.0.0.1" then -- TODO: The API output does include labels and certs when the - -- incremental sync is enabled. - if inc_sync == "off" then + -- rpc sync is enabled. + if rpc_sync == "off" then assert.equal(1897136778, v.cert_details.expiry_timestamp) end return true @@ -942,4 +942,4 @@ describe("CP/DP cert details(cluster_mtls = pki) #" .. strategy, function() end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/02-start_stop_spec.lua b/spec/02-integration/09-hybrid_mode/02-start_stop_spec.lua index c236b1ec1c8b..9b4c4222139a 100644 --- a/spec/02-integration/09-hybrid_mode/02-start_stop_spec.lua +++ b/spec/02-integration/09-hybrid_mode/02-start_stop_spec.lua @@ -2,9 +2,9 @@ local helpers = require "spec.helpers" for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] -describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() +describe("invalid config are rejected" .. " rpc_sync=" .. rpc_sync, function() describe("role is control_plane", function() it("can not disable admin_listen", function() local ok, err = helpers.start_kong({ @@ -15,7 +15,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() cluster_cert_key = "spec/fixtures/kong_clustering.key", admin_listen = "off", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) assert.False(ok) @@ -31,7 +31,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() cluster_cert_key = "spec/fixtures/kong_clustering.key", cluster_listen = "off", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) assert.False(ok) @@ -47,7 +47,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() cluster_cert_key = "spec/fixtures/kong_clustering.key", database = "off", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) assert.False(ok) @@ -63,7 +63,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() cluster_cert_key = "spec/fixtures/kong_clustering.key", cluster_mtls = "pki", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) assert.False(ok) @@ -81,7 +81,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() cluster_cert_key = "spec/fixtures/kong_clustering.key", proxy_listen = "off", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) assert.False(ok) @@ -96,7 +96,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() cluster_cert = "spec/fixtures/kong_clustering.crt", cluster_cert_key = "spec/fixtures/kong_clustering.key", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) assert.False(ok) @@ -114,7 +114,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() cluster_cert_key = "spec/fixtures/kong_clustering.key", cluster_dp_labels = "w@:_a", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) assert.False(ok) @@ -132,7 +132,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() proxy_listen = "0.0.0.0:" .. helpers.get_available_port(), cluster_dp_labels = "Aa-._zZ_key:Aa-._zZ_val", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) assert.True(ok) helpers.stop_kong("servroot2") @@ -148,7 +148,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() database = param[2], prefix = "servroot2", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) assert.False(ok) @@ -163,7 +163,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() prefix = "servroot2", cluster_cert = "spec/fixtures/kong_clustering.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) assert.False(ok) @@ -174,7 +174,7 @@ describe("invalid config are rejected" .. " inc_sync=" .. inc_sync, function() end) -- note that lagacy modes still error when CP exits -describe("when CP exits before DP" .. " inc_sync=" .. inc_sync, function() +describe("when CP exits before DP" .. " rpc_sync=" .. rpc_sync, function() local need_exit = true lazy_setup(function() @@ -188,7 +188,7 @@ describe("when CP exits before DP" .. " inc_sync=" .. inc_sync, function() cluster_cert_key = "spec/fixtures/kong_clustering.key", cluster_listen = "127.0.0.1:9005", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ role = "data_plane", @@ -199,7 +199,7 @@ describe("when CP exits before DP" .. " inc_sync=" .. inc_sync, function() proxy_listen = "0.0.0.0:9002", database = "off", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, -- EE [[ -- vitals uses the clustering strategy by default, and it logs the exact -- same "error while receiving frame from peer" error strings that this @@ -223,4 +223,4 @@ describe("when CP exits before DP" .. " inc_sync=" .. inc_sync, function() assert.logfile("servroot2/logs/error.log").has.no.line("error while receiving frame from peer", true) end) end) -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/03-pki_spec.lua b/spec/02-integration/09-hybrid_mode/03-pki_spec.lua index adb53b801c3e..90c182e6d140 100644 --- a/spec/02-integration/09-hybrid_mode/03-pki_spec.lua +++ b/spec/02-integration/09-hybrid_mode/03-pki_spec.lua @@ -3,11 +3,11 @@ local cjson = require "cjson.safe" for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy() do -describe("CP/DP PKI sync #" .. strategy .. " inc_sync=" .. inc_sync, function() +describe("CP/DP PKI sync #" .. strategy .. " rpc_sync=" .. rpc_sync, function() lazy_setup(function() helpers.get_db_utils(strategy, { @@ -29,7 +29,7 @@ describe("CP/DP PKI sync #" .. strategy .. " inc_sync=" .. inc_sync, function() cluster_mtls = "pki", cluster_ca_cert = "spec/fixtures/kong_clustering_ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -46,7 +46,7 @@ describe("CP/DP PKI sync #" .. strategy .. " inc_sync=" .. inc_sync, function() cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/kong_clustering.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, worker_state_update_frequency = 1, })) end) @@ -167,4 +167,4 @@ describe("CP/DP PKI sync #" .. strategy .. " inc_sync=" .. inc_sync, function() end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/04-cp_cluster_sync_spec.lua b/spec/02-integration/09-hybrid_mode/04-cp_cluster_sync_spec.lua index 17d836fc9792..5a47069db05d 100644 --- a/spec/02-integration/09-hybrid_mode/04-cp_cluster_sync_spec.lua +++ b/spec/02-integration/09-hybrid_mode/04-cp_cluster_sync_spec.lua @@ -21,10 +21,10 @@ end for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy() do - describe("CP/CP sync works with #" .. strategy .. " inc_sync=" .. inc_sync .. " backend", function() + describe("CP/CP sync works with #" .. strategy .. " rpc_sync=" .. rpc_sync .. " backend", function() lazy_setup(function() helpers.get_db_utils(strategy, { "routes", "services" }) @@ -39,7 +39,7 @@ for _, strategy in helpers.each_strategy() do cluster_cert_key = "spec/fixtures/kong_clustering.key", database = strategy, cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -53,7 +53,7 @@ for _, strategy in helpers.each_strategy() do cluster_cert_key = "spec/fixtures/kong_clustering.key", database = strategy, cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -86,4 +86,4 @@ for _, strategy in helpers.each_strategy() do end) end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/05-ocsp_spec.lua b/spec/02-integration/09-hybrid_mode/05-ocsp_spec.lua index ee6b50d3b34b..26824085c674 100644 --- a/spec/02-integration/09-hybrid_mode/05-ocsp_spec.lua +++ b/spec/02-integration/09-hybrid_mode/05-ocsp_spec.lua @@ -15,11 +15,11 @@ end for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy() do -describe("cluster_ocsp = on works #" .. strategy .. " inc_sync=" .. inc_sync, function() +describe("cluster_ocsp = on works #" .. strategy .. " rpc_sync=" .. rpc_sync, function() describe("DP certificate good", function() lazy_setup(function() helpers.get_db_utils(strategy, { @@ -44,7 +44,7 @@ describe("cluster_ocsp = on works #" .. strategy .. " inc_sync=" .. inc_sync, fu cluster_mtls = "pki", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) set_ocsp_status("good") @@ -63,7 +63,7 @@ describe("cluster_ocsp = on works #" .. strategy .. " inc_sync=" .. inc_sync, fu cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -118,7 +118,7 @@ describe("cluster_ocsp = on works #" .. strategy .. " inc_sync=" .. inc_sync, fu cluster_mtls = "pki", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) set_ocsp_status("revoked") @@ -137,7 +137,7 @@ describe("cluster_ocsp = on works #" .. strategy .. " inc_sync=" .. inc_sync, fu cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -190,7 +190,7 @@ describe("cluster_ocsp = on works #" .. strategy .. " inc_sync=" .. inc_sync, fu cluster_mtls = "pki", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) set_ocsp_status("error") @@ -209,7 +209,7 @@ describe("cluster_ocsp = on works #" .. strategy .. " inc_sync=" .. inc_sync, fu cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -240,7 +240,7 @@ describe("cluster_ocsp = on works #" .. strategy .. " inc_sync=" .. inc_sync, fu end) end) -describe("cluster_ocsp = off works with #" .. strategy .. " inc_sync=" .. inc_sync .. " backend", function() +describe("cluster_ocsp = off works with #" .. strategy .. " rpc_sync=" .. rpc_sync .. " backend", function() describe("DP certificate revoked, not checking for OCSP", function() lazy_setup(function() helpers.get_db_utils(strategy, { @@ -265,7 +265,7 @@ describe("cluster_ocsp = off works with #" .. strategy .. " inc_sync=" .. inc_sy cluster_mtls = "pki", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) set_ocsp_status("revoked") @@ -284,7 +284,7 @@ describe("cluster_ocsp = off works with #" .. strategy .. " inc_sync=" .. inc_sy cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -316,7 +316,7 @@ describe("cluster_ocsp = off works with #" .. strategy .. " inc_sync=" .. inc_sy end) end) -describe("cluster_ocsp = optional works with #" .. strategy .. " inc_sync=" .. inc_sync .. " backend", function() +describe("cluster_ocsp = optional works with #" .. strategy .. " rpc_sync=" .. rpc_sync .. " backend", function() describe("DP certificate revoked", function() lazy_setup(function() helpers.get_db_utils(strategy, { @@ -341,7 +341,7 @@ describe("cluster_ocsp = optional works with #" .. strategy .. " inc_sync=" .. i cluster_mtls = "pki", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) set_ocsp_status("revoked") @@ -360,7 +360,7 @@ describe("cluster_ocsp = optional works with #" .. strategy .. " inc_sync=" .. i cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -413,7 +413,7 @@ describe("cluster_ocsp = optional works with #" .. strategy .. " inc_sync=" .. i cluster_mtls = "pki", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) set_ocsp_status("error") @@ -432,7 +432,7 @@ describe("cluster_ocsp = optional works with #" .. strategy .. " inc_sync=" .. i cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -468,4 +468,4 @@ describe("cluster_ocsp = optional works with #" .. strategy .. " inc_sync=" .. i end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/08-lazy_export_spec.lua b/spec/02-integration/09-hybrid_mode/08-lazy_export_spec.lua index bbeb3524842f..bc235385c6b5 100644 --- a/spec/02-integration/09-hybrid_mode/08-lazy_export_spec.lua +++ b/spec/02-integration/09-hybrid_mode/08-lazy_export_spec.lua @@ -2,7 +2,7 @@ local helpers = require "spec.helpers" local admin_client -local function cp(strategy, rpc, inc_sync) +local function cp(strategy, rpc, rpc_sync) helpers.get_db_utils(strategy) -- make sure the DB is fresh n' clean assert(helpers.start_kong({ role = "control_plane", @@ -15,7 +15,7 @@ local function cp(strategy, rpc, inc_sync) cluster_mtls = "pki", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) admin_client = assert(helpers.admin_client()) end @@ -36,7 +36,7 @@ local function touch_config() })) end -local function json_dp(rpc, inc_sync) +local function json_dp(rpc, rpc_sync) assert(helpers.start_kong({ role = "data_plane", database = "off", @@ -50,27 +50,27 @@ local function json_dp(rpc, inc_sync) cluster_server_name = "kong_clustering", cluster_ca_cert = "spec/fixtures/ocsp_certs/ca.crt", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy() do -describe("lazy_export with #".. strategy .. " inc_sync=" .. inc_sync, function() +describe("lazy_export with #".. strategy .. " rpc_sync=" .. rpc_sync, function() describe("no DP", function () setup(function() - cp(strategy, rpc, inc_sync) + cp(strategy, rpc, rpc_sync) end) teardown(function () helpers.stop_kong() end) it("test", function () touch_config() - if inc_sync == "on" then + if rpc_sync == "on" then assert.logfile().has.no.line("[kong.sync.v2] config push (connected client)", true) else @@ -81,8 +81,8 @@ describe("lazy_export with #".. strategy .. " inc_sync=" .. inc_sync, function() describe("only json DP", function() setup(function() - cp(strategy, rpc, inc_sync) - json_dp(rpc, inc_sync) + cp(strategy, rpc, rpc_sync) + json_dp(rpc, rpc_sync) end) teardown(function () helpers.stop_kong("dp1") @@ -91,7 +91,7 @@ describe("lazy_export with #".. strategy .. " inc_sync=" .. inc_sync, function() it("test", function () touch_config() - if inc_sync == "on" then + if rpc_sync == "on" then assert.logfile().has.line("[kong.sync.v2] config push (connected client)", true) assert.logfile().has.line("[kong.sync.v2] database is empty or too far behind for node_id", true) @@ -105,4 +105,4 @@ describe("lazy_export with #".. strategy .. " inc_sync=" .. inc_sync, function() end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/09-config-compat_spec.lua b/spec/02-integration/09-hybrid_mode/09-config-compat_spec.lua index 8c2b26fba41a..79a73e5ec500 100644 --- a/spec/02-integration/09-hybrid_mode/09-config-compat_spec.lua +++ b/spec/02-integration/09-hybrid_mode/09-config-compat_spec.lua @@ -76,8 +76,8 @@ local function get_sync_status(id) end --- XXX TODO: helpers.clustering_client supports incremental sync -for _, inc_sync in ipairs { "off" } do +-- XXX TODO: helpers.clustering_client supports rpc sync +for _, rpc_sync in ipairs { "off" } do for _, strategy in helpers.each_strategy() do describe("CP/DP config compat transformations #" .. strategy, function() @@ -103,7 +103,7 @@ describe("CP/DP config compat transformations #" .. strategy, function() cluster_listen = CP_HOST .. ":" .. CP_PORT, nginx_conf = "spec/fixtures/custom_nginx.template", plugins = "bundled", - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -1201,4 +1201,4 @@ describe("CP/DP config compat transformations #" .. strategy, function() end) end -- each strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/09-node-id-persistence_spec.lua b/spec/02-integration/09-hybrid_mode/09-node-id-persistence_spec.lua index 28fa82074e07..7b358f629202 100644 --- a/spec/02-integration/09-hybrid_mode/09-node-id-persistence_spec.lua +++ b/spec/02-integration/09-hybrid_mode/09-node-id-persistence_spec.lua @@ -84,10 +84,10 @@ end for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy() do - describe("node id persistence " .. " inc_sync=" .. inc_sync, function() + describe("node id persistence " .. " rpc_sync=" .. rpc_sync, function() local control_plane_config = { role = "control_plane", @@ -97,7 +97,7 @@ for _, strategy in helpers.each_strategy() do cluster_listen = "127.0.0.1:9005", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, } local data_plane_config = { @@ -113,7 +113,7 @@ for _, strategy in helpers.each_strategy() do untrusted_lua = "on", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, worker_state_update_frequency = 1, } @@ -331,4 +331,4 @@ for _, strategy in helpers.each_strategy() do end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/10-forward-proxy_spec.lua b/spec/02-integration/09-hybrid_mode/10-forward-proxy_spec.lua index 27856b4554ee..80f30902aa5f 100644 --- a/spec/02-integration/09-hybrid_mode/10-forward-proxy_spec.lua +++ b/spec/02-integration/09-hybrid_mode/10-forward-proxy_spec.lua @@ -72,11 +72,11 @@ local proxy_configs = { -- test run too fast before the proxy connection is established for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy() do for proxy_desc, proxy_opts in pairs(proxy_configs) do describe("CP/DP sync through proxy (" .. proxy_desc .. ") works with #" - .. strategy .. " rpc=" .. rpc .. " inc_sync=" .. inc_sync + .. strategy .. " rpc=" .. rpc .. " rpc_sync=" .. rpc_sync .. " backend", function() lazy_setup(function() helpers.get_db_utils(strategy) -- runs migrations @@ -90,7 +90,7 @@ for _, strategy in helpers.each_strategy() do cluster_listen = "127.0.0.1:9005", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -112,7 +112,7 @@ for _, strategy in helpers.each_strategy() do lua_ssl_trusted_certificate = proxy_opts.lua_ssl_trusted_certificate, cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, -- this is unused, but required for the template to include a stream {} block stream_listen = "0.0.0.0:5555", @@ -172,7 +172,7 @@ for _, strategy in helpers.each_strategy() do end -- check the debug log of the `cluster_use_proxy` option - local line = inc_sync == "on" and "[rpc] using proxy" or + local line = rpc_sync == "on" and "[rpc] using proxy" or "[clustering] using proxy" assert.logfile("servroot2/logs/error.log").has.line(line, true) end) @@ -181,4 +181,4 @@ for _, strategy in helpers.each_strategy() do end -- proxy configs end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/11-status_spec.lua b/spec/02-integration/09-hybrid_mode/11-status_spec.lua index c6ada743ee1d..86cd89418191 100644 --- a/spec/02-integration/09-hybrid_mode/11-status_spec.lua +++ b/spec/02-integration/09-hybrid_mode/11-status_spec.lua @@ -5,11 +5,11 @@ local cp_status_port = helpers.get_available_port() local dp_status_port = 8100 for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy() do - describe("Hybrid Mode - status ready #" .. strategy .. " inc_sync=" .. inc_sync, function() + describe("Hybrid Mode - status ready #" .. strategy .. " rpc_sync=" .. rpc_sync, function() helpers.get_db_utils(strategy, {}) @@ -25,7 +25,7 @@ for _, strategy in helpers.each_strategy() do nginx_main_worker_processes = 8, status_listen = "127.0.0.1:" .. dp_status_port, cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) end @@ -40,7 +40,7 @@ for _, strategy in helpers.each_strategy() do nginx_conf = "spec/fixtures/custom_nginx.template", status_listen = "127.0.0.1:" .. cp_status_port, cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }) end @@ -75,7 +75,7 @@ for _, strategy in helpers.each_strategy() do describe("dp status ready endpoint for no config", function() -- XXX FIXME - local skip_inc_sync = inc_sync == "on" and pending or it + local skip_rpc_sync = rpc_sync == "on" and pending or it lazy_setup(function() assert(start_kong_cp()) @@ -108,7 +108,7 @@ for _, strategy in helpers.each_strategy() do -- now dp receive config from cp, so dp should be ready - skip_inc_sync("should return 200 on data plane after configuring", function() + skip_rpc_sync("should return 200 on data plane after configuring", function() helpers.wait_until(function() local http_client = helpers.http_client('127.0.0.1', dp_status_port) @@ -166,4 +166,4 @@ for _, strategy in helpers.each_strategy() do end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/12-errors_spec.lua b/spec/02-integration/09-hybrid_mode/12-errors_spec.lua index fbbc3049cd55..52b3147a6ae4 100644 --- a/spec/02-integration/09-hybrid_mode/12-errors_spec.lua +++ b/spec/02-integration/09-hybrid_mode/12-errors_spec.lua @@ -69,10 +69,10 @@ local function get_error_report(client, msg) end --- XXX TODO: mock_cp does not support incremental sync rpc -for _, inc_sync in ipairs { "off" } do +-- XXX TODO: mock_cp does not support rpc sync rpc +for _, rpc_sync in ipairs { "off" } do for _, strategy in helpers.each_strategy() do - describe("CP/DP sync error-reporting with #" .. strategy .. " inc_sync=" .. inc_sync .. " backend", function() + describe("CP/DP sync error-reporting with #" .. strategy .. " rpc_sync=" .. rpc_sync .. " backend", function() local client local cluster_port local cluster_ssl_port @@ -102,7 +102,7 @@ for _, strategy in helpers.each_strategy() do -- use a small map size so that it's easy for us to max it out lmdb_map_size = "1m", plugins = "bundled,cluster-error-reporting", - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, }, nil, nil, fixtures)) end) @@ -260,4 +260,4 @@ for _, strategy in helpers.each_strategy() do end) end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/13-deprecations_spec.lua b/spec/02-integration/09-hybrid_mode/13-deprecations_spec.lua index fcb8bf6bbca7..a1d824cf72b3 100644 --- a/spec/02-integration/09-hybrid_mode/13-deprecations_spec.lua +++ b/spec/02-integration/09-hybrid_mode/13-deprecations_spec.lua @@ -4,10 +4,10 @@ local join = require("pl.stringx").join local ENABLED_PLUGINS = { "dummy" , "reconfiguration-completion"} for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy({"postgres"}) do - describe("deprecations are not reported on DP but on CP " .. " inc_sync=" .. inc_sync, function() + describe("deprecations are not reported on DP but on CP " .. " rpc_sync=" .. rpc_sync, function() local cp_prefix = "servroot1" local dp_prefix = "servroot2" local cp_logfile, dp_logfile, route @@ -45,7 +45,7 @@ for _, strategy in helpers.each_strategy({"postgres"}) do admin_listen = "0.0.0.0:9001", proxy_listen = "off", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -61,7 +61,7 @@ for _, strategy in helpers.each_strategy({"postgres"}) do admin_listen = "off", proxy_listen = "0.0.0.0:9002", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) dp_logfile = helpers.get_running_conf(dp_prefix).nginx_err_logs cp_logfile = helpers.get_running_conf(cp_prefix).nginx_err_logs @@ -119,4 +119,4 @@ for _, strategy in helpers.each_strategy({"postgres"}) do end) end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/09-hybrid_mode/14-dp_privileged_agent_spec.lua b/spec/02-integration/09-hybrid_mode/14-dp_privileged_agent_spec.lua index 1c5e351bf874..d78ec1469e24 100644 --- a/spec/02-integration/09-hybrid_mode/14-dp_privileged_agent_spec.lua +++ b/spec/02-integration/09-hybrid_mode/14-dp_privileged_agent_spec.lua @@ -5,7 +5,7 @@ local CLUSTERING_SYNC_STATUS = require("kong.constants").CLUSTERING_SYNC_STATUS for _, dedicated in ipairs { "on", "off" } do for _, strategy in helpers.each_strategy() do -describe("DP diabled Incremental Sync RPC #" .. strategy, function() +describe("DP diabled Sync RPC #" .. strategy, function() lazy_setup(function() helpers.get_db_utils(strategy, { @@ -21,7 +21,7 @@ describe("DP diabled Incremental Sync RPC #" .. strategy, function() nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = "on", - cluster_incremental_sync = "on", -- ENABLE incremental sync + cluster_rpc_sync = "on", -- ENABLE rpc sync })) assert(helpers.start_kong({ @@ -36,7 +36,7 @@ describe("DP diabled Incremental Sync RPC #" .. strategy, function() nginx_worker_processes = 2, -- multiple workers cluster_rpc = "off", -- DISABLE rpc - cluster_incremental_sync = "off", -- DISABLE incremental sync + cluster_rpc_sync = "off", -- DISABLE rpc sync dedicated_config_processing = dedicated, -- privileged agent })) diff --git a/spec/02-integration/18-hybrid_rpc/01-rpc_spec.lua b/spec/02-integration/18-hybrid_rpc/01-rpc_spec.lua index 8f670a0388e1..218b28b62501 100644 --- a/spec/02-integration/18-hybrid_rpc/01-rpc_spec.lua +++ b/spec/02-integration/18-hybrid_rpc/01-rpc_spec.lua @@ -20,7 +20,7 @@ for _, strategy in helpers.each_strategy() do nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = "on", plugins = "bundled,rpc-hello-test", - cluster_incremental_sync = "off", + cluster_rpc_sync = "off", })) assert(helpers.start_kong({ @@ -34,7 +34,7 @@ for _, strategy in helpers.each_strategy() do nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = "on", plugins = "bundled,rpc-hello-test", - cluster_incremental_sync = "off", + cluster_rpc_sync = "off", })) end) diff --git a/spec/02-integration/18-hybrid_rpc/04-concentrator_spec.lua b/spec/02-integration/18-hybrid_rpc/04-concentrator_spec.lua index 445bcee6ec12..9986d5f4e798 100644 --- a/spec/02-integration/18-hybrid_rpc/04-concentrator_spec.lua +++ b/spec/02-integration/18-hybrid_rpc/04-concentrator_spec.lua @@ -47,7 +47,7 @@ for _, strategy in helpers.each_strategy() do nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = "on", plugins = "bundled,rpc-hello-test", - cluster_incremental_sync = "off", + cluster_rpc_sync = "off", })) assert(helpers.start_kong({ @@ -60,7 +60,7 @@ for _, strategy in helpers.each_strategy() do nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = "on", plugins = "bundled,rpc-hello-test", - cluster_incremental_sync = "off", + cluster_rpc_sync = "off", })) assert(helpers.start_kong({ @@ -74,7 +74,7 @@ for _, strategy in helpers.each_strategy() do nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = "on", plugins = "bundled,rpc-hello-test", - cluster_incremental_sync = "off", + cluster_rpc_sync = "off", })) end) diff --git a/spec/02-integration/19-incrmental_sync/01-sync_spec.lua b/spec/02-integration/19-incrmental_sync/01-sync_spec.lua index abb969d2a0fc..a608e6432edb 100644 --- a/spec/02-integration/19-incrmental_sync/01-sync_spec.lua +++ b/spec/02-integration/19-incrmental_sync/01-sync_spec.lua @@ -36,7 +36,7 @@ describe("Incremental Sync RPC #" .. strategy, function() cluster_listen = "127.0.0.1:9005", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = "on", - cluster_incremental_sync = "on", -- incremental sync + cluster_rpc_sync = "on", -- rpc sync })) assert(helpers.start_kong({ @@ -50,7 +50,7 @@ describe("Incremental Sync RPC #" .. strategy, function() nginx_conf = "spec/fixtures/custom_nginx.template", nginx_worker_processes = 4, -- multiple workers cluster_rpc = "on", - cluster_incremental_sync = "on", -- incremental sync + cluster_rpc_sync = "on", -- rpc sync worker_state_update_frequency = 1, })) end) diff --git a/spec/02-integration/19-incrmental_sync/02-multiple_dp_nodes_spec.lua b/spec/02-integration/19-incrmental_sync/02-multiple_dp_nodes_spec.lua index 8567c03fba26..fe7f89432a5a 100644 --- a/spec/02-integration/19-incrmental_sync/02-multiple_dp_nodes_spec.lua +++ b/spec/02-integration/19-incrmental_sync/02-multiple_dp_nodes_spec.lua @@ -10,7 +10,7 @@ local function start_cp(strategy, port) cluster_listen = "127.0.0.1:" .. port, nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = "on", - cluster_incremental_sync = "on", -- incremental sync + cluster_rpc_sync = "on", -- rpc sync })) end @@ -26,7 +26,7 @@ local function start_dp(prefix, port) nginx_conf = "spec/fixtures/custom_nginx.template", nginx_worker_processes = 4, -- multiple workers cluster_rpc = "on", - cluster_incremental_sync = "on", -- incremental sync + cluster_rpc_sync = "on", -- rpc sync worker_state_update_frequency = 1, })) end diff --git a/spec/02-integration/20-wasm/06-clustering_spec.lua b/spec/02-integration/20-wasm/06-clustering_spec.lua index 4d5a63e323d9..540331bcc67e 100644 --- a/spec/02-integration/20-wasm/06-clustering_spec.lua +++ b/spec/02-integration/20-wasm/06-clustering_spec.lua @@ -72,9 +72,9 @@ local function new_wasm_filter_directory() end --- XXX TODO: enable inc_sync = "on" -for _, inc_sync in ipairs { "off" } do -describe("#wasm - hybrid mode #postgres" .. " inc_sync=" .. inc_sync, function() +-- XXX TODO: enable rpc_sync = "on" +for _, rpc_sync in ipairs { "off" } do +describe("#wasm - hybrid mode #postgres" .. " rpc_sync=" .. rpc_sync, function() local cp_prefix = "cp" local cp_errlog = cp_prefix .. "/logs/error.log" local cp_filter_path @@ -115,7 +115,7 @@ describe("#wasm - hybrid mode #postgres" .. " inc_sync=" .. inc_sync, function() wasm_filters = "user", -- don't enable bundled filters for this test wasm_filters_path = cp_filter_path, nginx_main_worker_processes = 2, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert.logfile(cp_errlog).has.line([[successfully loaded "response_transformer" module]], true, 10) @@ -155,7 +155,7 @@ describe("#wasm - hybrid mode #postgres" .. " inc_sync=" .. inc_sync, function() wasm_filters_path = dp_filter_path, node_id = node_id, nginx_main_worker_processes = 2, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert.logfile(dp_errlog).has.line([[successfully loaded "response_transformer" module]], true, 10) @@ -311,7 +311,7 @@ describe("#wasm - hybrid mode #postgres" .. " inc_sync=" .. inc_sync, function() nginx_conf = "spec/fixtures/custom_nginx.template", wasm = "off", node_id = node_id, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -351,7 +351,7 @@ describe("#wasm - hybrid mode #postgres" .. " inc_sync=" .. inc_sync, function() wasm_filters = "user", -- don't enable bundled filters for this test wasm_filters_path = tmp_dir, node_id = node_id, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -370,4 +370,4 @@ describe("#wasm - hybrid mode #postgres" .. " inc_sync=" .. inc_sync, function() end) end) end) -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/02-integration/20-wasm/10-wasmtime_spec.lua b/spec/02-integration/20-wasm/10-wasmtime_spec.lua index 60a5bed93d86..44f595c7c6db 100644 --- a/spec/02-integration/20-wasm/10-wasmtime_spec.lua +++ b/spec/02-integration/20-wasm/10-wasmtime_spec.lua @@ -2,7 +2,7 @@ local helpers = require "spec.helpers" local fmt = string.format for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, role in ipairs({"traditional", "control_plane", "data_plane"}) do @@ -22,11 +22,11 @@ describe("#wasm wasmtime (role: " .. role .. ") (#postgres, #db)", function() cluster_cert = "spec/fixtures/kong_clustering.crt", cluster_cert_key = "spec/fixtures/kong_clustering.key", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) conf = assert(helpers.get_running_conf(prefix)) - conf.cluster_incremental_sync = inc_sync == "on" + conf.cluster_rpc_sync = rpc_sync == "on" end) lazy_teardown(function() @@ -98,11 +98,11 @@ describe("#wasm wasmtime (role: " .. role .. ") (#postgres, #db)", function() nginx_main_worker_processes = 2, cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) conf = assert(helpers.get_running_conf(prefix)) - conf.cluster_incremental_sync = inc_sync == "on" + conf.cluster_rpc_sync = rpc_sync == "on" -- we need to briefly spin up a control plane, or else we will get -- error.log entries when our data plane tries to connect @@ -121,7 +121,7 @@ describe("#wasm wasmtime (role: " .. role .. ") (#postgres, #db)", function() status_listen = "off", nginx_main_worker_processes = 2, cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end end) @@ -179,4 +179,4 @@ describe("#wasm wasmtime (role: " .. role .. ") (#postgres, #db)", function() end) -- wasmtime end -- each role -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/03-plugins/09-key-auth/04-hybrid_mode_spec.lua b/spec/03-plugins/09-key-auth/04-hybrid_mode_spec.lua index 9c353732a6f9..66f59e40b13f 100644 --- a/spec/03-plugins/09-key-auth/04-hybrid_mode_spec.lua +++ b/spec/03-plugins/09-key-auth/04-hybrid_mode_spec.lua @@ -2,10 +2,10 @@ local helpers = require "spec.helpers" for _, v in ipairs({ {"off", "off"}, {"on", "off"}, {"on", "on"}, }) do - local rpc, inc_sync = v[1], v[2] + local rpc, rpc_sync = v[1], v[2] for _, strategy in helpers.each_strategy({"postgres"}) do - describe("Plugin: key-auth (access) [#" .. strategy .. " inc_sync=" .. inc_sync .. "] auto-expiring keys", function() + describe("Plugin: key-auth (access) [#" .. strategy .. " rpc_sync=" .. rpc_sync .. "] auto-expiring keys", function() -- Give a bit of time to reduce test flakyness on slow setups local ttl = 10 local inserted_at @@ -43,7 +43,7 @@ for _, strategy in helpers.each_strategy({"postgres"}) do cluster_telemetry_listen = "127.0.0.1:9006", nginx_conf = "spec/fixtures/custom_nginx.template", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -57,7 +57,7 @@ for _, strategy in helpers.each_strategy({"postgres"}) do cluster_telemetry_endpoint = "127.0.0.1:9006", proxy_listen = "0.0.0.0:9002", cluster_rpc = rpc, - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -129,4 +129,4 @@ for _, strategy in helpers.each_strategy({"postgres"}) do end) end) end -- for _, strategy -end -- for inc_sync +end -- for rpc_sync diff --git a/spec/03-plugins/11-correlation-id/02-schema_spec.lua b/spec/03-plugins/11-correlation-id/02-schema_spec.lua index b02cc906505f..e5aa7c3035e5 100644 --- a/spec/03-plugins/11-correlation-id/02-schema_spec.lua +++ b/spec/03-plugins/11-correlation-id/02-schema_spec.lua @@ -86,9 +86,9 @@ describe("Plugin: correlation-id (schema) #a [#" .. strategy .."]", function() end) end) - --- XXX FIXME: enable inc_sync = on - for _, inc_sync in ipairs { "off" } do - describe("in hybrid mode" .. " inc_sync=" .. inc_sync, function() + --- XXX FIXME: enable rpc_sync = on + for _, rpc_sync in ipairs { "off" } do + describe("in hybrid mode" .. " rpc_sync=" .. rpc_sync, function() local route lazy_setup(function() route = bp.routes:insert({ @@ -124,7 +124,7 @@ describe("Plugin: correlation-id (schema) #a [#" .. strategy .."]", function() prefix = "servroot", cluster_listen = "127.0.0.1:9005", nginx_conf = "spec/fixtures/custom_nginx.template", - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) assert(helpers.start_kong({ @@ -136,7 +136,7 @@ describe("Plugin: correlation-id (schema) #a [#" .. strategy .."]", function() cluster_control_plane = "127.0.0.1:9005", proxy_listen = "0.0.0.0:9002", status_listen = "127.0.0.1:9100", - cluster_incremental_sync = inc_sync, + cluster_rpc_sync = rpc_sync, })) end) @@ -186,5 +186,5 @@ describe("Plugin: correlation-id (schema) #a [#" .. strategy .."]", function() proxy_client:close() end) end) - end -- for inc_sync + end -- for rpc_sync end) From be7e3567e459d2df6c9252eb4332fca46887ab81 Mon Sep 17 00:00:00 2001 From: Chrono Date: Wed, 18 Dec 2024 14:14:19 +0800 Subject: [PATCH 09/18] feat(clustering/rpc): support jsonrpc notification (#13948) https://konghq.atlassian.net/browse/KAG-5893 --- kong/clustering/rpc/concentrator.lua | 19 ++++++-- kong/clustering/rpc/future.lua | 27 ++++++++--- kong/clustering/rpc/manager.lua | 68 ++++++++++++++++++---------- kong/clustering/rpc/socket.lua | 25 ++++++++-- 4 files changed, 102 insertions(+), 37 deletions(-) diff --git a/kong/clustering/rpc/concentrator.lua b/kong/clustering/rpc/concentrator.lua index 68bb0bc33880..80d19cad769d 100644 --- a/kong/clustering/rpc/concentrator.lua +++ b/kong/clustering/rpc/concentrator.lua @@ -154,7 +154,14 @@ function _M:_event_loop(lconn) "unknown requester for RPC") local res, err = self.manager:_local_call(target_id, payload.method, - payload.params) + payload.params, not payload.id) + + -- notification has no callback or id + if not payload.id then + ngx_log(ngx_DEBUG, "[rpc] notification has no response") + goto continue + end + if res then -- call success res, err = self:_enqueue_rpc_response(reply_to, { @@ -180,6 +187,8 @@ function _M:_event_loop(lconn) ngx_log(ngx_WARN, "[rpc] unable to enqueue RPC error: ", err) end end + + ::continue:: end end end @@ -287,9 +296,13 @@ end -- This way the manager code wouldn't tell the difference -- between calls made over WebSocket or concentrator function _M:call(node_id, method, params, callback) - local id = self:_get_next_id() + local id - self.interest[id] = callback + -- notification has no callback or id + if callback then + id = self:_get_next_id() + self.interest[id] = callback + end return self:_enqueue_rpc_request(node_id, { jsonrpc = jsonrpc.VERSION, diff --git a/kong/clustering/rpc/future.lua b/kong/clustering/rpc/future.lua index 68ed82720f09..ee91ed9e54fc 100644 --- a/kong/clustering/rpc/future.lua +++ b/kong/clustering/rpc/future.lua @@ -12,25 +12,36 @@ local STATE_SUCCEED = 3 local STATE_ERRORED = 4 -function _M.new(node_id, socket, method, params) +function _M.new(node_id, socket, method, params, is_notification) local self = { method = method, params = params, - sema = semaphore.new(), socket = socket, node_id = node_id, - id = nil, - result = nil, - error = nil, - state = STATE_NEW, -- STATE_* + is_notification = is_notification, } + if not is_notification then + self.id = nil + self.result = nil + self.error = nil + self.state = STATE_NEW -- STATE_* + self.sema = semaphore.new() + end + return setmetatable(self, _MT) end -- start executing the future function _M:start() + -- notification has no callback + if self.is_notification then + return self.socket:call(self.node_id, + self.method, + self.params) + end + assert(self.state == STATE_NEW) self.state = STATE_IN_PROGRESS @@ -60,6 +71,10 @@ end function _M:wait(timeout) + if self.is_notification then + return nil, "the notification cannot be waited" + end + assert(self.state == STATE_IN_PROGRESS) local res, err = self.sema:wait(timeout) diff --git a/kong/clustering/rpc/manager.lua b/kong/clustering/rpc/manager.lua index 3d08963b4687..ea5c4f5a2822 100644 --- a/kong/clustering/rpc/manager.lua +++ b/kong/clustering/rpc/manager.lua @@ -33,6 +33,7 @@ local CLUSTERING_PING_INTERVAL = constants.CLUSTERING_PING_INTERVAL local parse_proxy_url = require("kong.clustering.utils").parse_proxy_url +local _log_prefix = "[rpc] " local RPC_MATA_V1 = "kong.meta.v1" local RPC_SNAPPY_FRAMED = "x-snappy-framed" @@ -276,7 +277,7 @@ end -- low level helper used internally by :call() and concentrator -- this one does not consider forwarding using concentrator -- when node does not exist -function _M:_local_call(node_id, method, params) +function _M:_local_call(node_id, method, params, is_notification) if not self.client_capabilities[node_id] then return nil, "node is not connected, node_id: " .. node_id end @@ -289,9 +290,14 @@ function _M:_local_call(node_id, method, params) local s = next(self.clients[node_id]) -- TODO: better LB? - local fut = future.new(node_id, s, method, params) + local fut = future.new(node_id, s, method, params, is_notification) assert(fut:start()) + -- notification need not to wait + if is_notification then + return true + end + local ok, err = fut:wait(5) if err then return nil, err @@ -305,9 +311,7 @@ function _M:_local_call(node_id, method, params) end --- public interface, try call on node_id locally first, --- if node is not connected, try concentrator next -function _M:call(node_id, method, ...) +function _M:_call_or_notify(is_notification, node_id, method, ...) local cap = utils.parse_method_name(method) local res, err = self:_find_node_and_check_capability(node_id, cap) @@ -318,20 +322,22 @@ function _M:call(node_id, method, ...) local params = {...} ngx_log(ngx_DEBUG, - "[rpc] calling ", method, + _log_prefix, + is_notification and "notifying " or "calling ", + method, "(node_id: ", node_id, ")", " via ", res == "local" and "local" or "concentrator" ) if res == "local" then - res, err = self:_local_call(node_id, method, params) + res, err = self:_local_call(node_id, method, params, is_notification) if not res then - ngx_log(ngx_DEBUG, "[rpc] ", method, " failed, err: ", err) + ngx_log(ngx_DEBUG, _log_prefix, method, " failed, err: ", err) return nil, err end - ngx_log(ngx_DEBUG, "[rpc] ", method, " succeeded") + ngx_log(ngx_DEBUG, _log_prefix, method, " succeeded") return res end @@ -339,29 +345,45 @@ function _M:call(node_id, method, ...) assert(res == "concentrator") -- try concentrator - local fut = future.new(node_id, self.concentrator, method, params) + local fut = future.new(node_id, self.concentrator, method, params, is_notification) assert(fut:start()) + if is_notification then + return true + end + local ok, err = fut:wait(5) if err then - ngx_log(ngx_DEBUG, "[rpc] ", method, " failed, err: ", err) + ngx_log(ngx_DEBUG, _log_prefix, method, " failed, err: ", err) return nil, err end if ok then - ngx_log(ngx_DEBUG, "[rpc] ", method, " succeeded") + ngx_log(ngx_DEBUG, _log_prefix, method, " succeeded") return fut.result end - ngx_log(ngx_DEBUG, "[rpc] ", method, " failed, err: ", fut.error.message) + ngx_log(ngx_DEBUG, _log_prefix, method, " failed, err: ", fut.error.message) return nil, fut.error.message end +-- public interface, try call on node_id locally first, +-- if node is not connected, try concentrator next +function _M:call(node_id, method, ...) + return self:_call_or_notify(false, node_id, method, ...) +end + + +function _M:notify(node_id, method, ...) + return self:_call_or_notify(true, node_id, method, ...) +end + + -- handle incoming client connections function _M:handle_websocket() local rpc_protocol = ngx_var.http_sec_websocket_protocol @@ -379,7 +401,7 @@ function _M:handle_websocket() end if not meta_v1_supported then - ngx_log(ngx_ERR, "[rpc] unknown RPC protocol: " .. + ngx_log(ngx_ERR, _log_prefix, "unknown RPC protocol: " .. tostring(rpc_protocol) .. ", doesn't know how to communicate with client") return ngx_exit(ngx.HTTP_CLOSE) @@ -387,7 +409,7 @@ function _M:handle_websocket() local cert, err = validate_client_cert(self.conf, self.cluster_cert, ngx_var.ssl_client_raw_cert) if not cert then - ngx_log(ngx_ERR, "[rpc] client's certificate failed validation: ", err) + ngx_log(ngx_ERR, _log_prefix, "client's certificate failed validation: ", err) return ngx_exit(ngx.HTTP_CLOSE) end @@ -396,14 +418,14 @@ function _M:handle_websocket() local wb, err = server:new(WS_OPTS) if not wb then - ngx_log(ngx_ERR, "[rpc] unable to establish WebSocket connection with client: ", err) + ngx_log(ngx_ERR, _log_prefix, "unable to establish WebSocket connection with client: ", err) return ngx_exit(ngx.HTTP_CLOSE) end -- if timeout (default is 5s) we will close the connection local node_id, err = self:_handle_meta_call(wb) if not node_id then - ngx_log(ngx_ERR, "[rpc] unable to handshake with client: ", err) + ngx_log(ngx_ERR, _log_prefix, "unable to handshake with client: ", err) return ngx_exit(ngx.HTTP_CLOSE) end @@ -415,7 +437,7 @@ function _M:handle_websocket() self:_remove_socket(s) if not res then - ngx_log(ngx_ERR, "[rpc] RPC connection broken: ", err, " node_id: ", node_id) + ngx_log(ngx_ERR, _log_prefix, "RPC connection broken: ", err, " node_id: ", node_id) return ngx_exit(ngx.ERROR) end @@ -488,7 +510,7 @@ function _M:connect(premature, node_id, host, path, cert, key) local ok, err = c:connect(uri, opts) if not ok then - ngx_log(ngx_ERR, "[rpc] unable to connect to peer: ", err) + ngx_log(ngx_ERR, _log_prefix, "unable to connect to peer: ", err) goto err end @@ -497,7 +519,7 @@ function _M:connect(premature, node_id, host, path, cert, key) -- FIXME: resp_headers should not be case sensitive if not resp_headers or not resp_headers["sec_websocket_protocol"] then - ngx_log(ngx_ERR, "[rpc] peer did not provide sec_websocket_protocol, node_id: ", node_id) + ngx_log(ngx_ERR, _log_prefix, "peer did not provide sec_websocket_protocol, node_id: ", node_id) c:send_close() -- can't do much if this fails goto err end @@ -506,7 +528,7 @@ function _M:connect(premature, node_id, host, path, cert, key) local meta_cap = resp_headers["sec_websocket_protocol"] if meta_cap ~= RPC_MATA_V1 then - ngx_log(ngx_ERR, "[rpc] did not support protocol : ", meta_cap) + ngx_log(ngx_ERR, _log_prefix, "did not support protocol : ", meta_cap) c:send_close() -- can't do much if this fails goto err end @@ -514,7 +536,7 @@ function _M:connect(premature, node_id, host, path, cert, key) -- if timeout (default is 5s) we will close the connection local ok, err = self:_meta_call(c, meta_cap, node_id) if not ok then - ngx_log(ngx_ERR, "[rpc] unable to handshake with server, node_id: ", node_id, + ngx_log(ngx_ERR, _log_prefix, "unable to handshake with server, node_id: ", node_id, " err: ", err) c:send_close() -- can't do much if this fails goto err @@ -529,7 +551,7 @@ function _M:connect(premature, node_id, host, path, cert, key) self:_remove_socket(s) if not ok then - ngx_log(ngx_ERR, "[rpc] connection to node_id: ", node_id, " broken, err: ", + ngx_log(ngx_ERR, _log_prefix, "connection to node_id: ", node_id, " broken, err: ", err, ", reconnecting in ", reconnection_delay, " seconds") end end diff --git a/kong/clustering/rpc/socket.lua b/kong/clustering/rpc/socket.lua index 045ca8c75577..2044acf170a3 100644 --- a/kong/clustering/rpc/socket.lua +++ b/kong/clustering/rpc/socket.lua @@ -68,6 +68,11 @@ function _M._dispatch(premature, self, cb, payload) if not res then ngx_log(ngx_WARN, "[rpc] RPC callback failed: ", err) + -- notification has no response + if not payload.id then + return + end + res, err = self.outgoing:push(new_error(payload.id, jsonrpc.SERVER_ERROR, err)) if not res then @@ -77,6 +82,12 @@ function _M._dispatch(premature, self, cb, payload) return end + -- notification has no response + if not payload.id then + ngx_log(ngx_DEBUG, "[rpc] notification has no response") + return + end + -- success res, err = self.outgoing:push({ jsonrpc = jsonrpc.VERSION, @@ -151,7 +162,7 @@ function _M:start() ngx_log(ngx_DEBUG, "[rpc] got RPC call: ", payload.method, " (id: ", payload.id, ")") local dispatch_cb = self.manager.callbacks.callbacks[payload.method] - if not dispatch_cb then + if not dispatch_cb and payload.id then local res, err = self.outgoing:push(new_error(payload.id, jsonrpc.METHOD_NOT_FOUND)) if not res then return nil, "unable to send \"METHOD_NOT_FOUND\" error back to client: " .. err @@ -162,9 +173,9 @@ function _M:start() -- call dispatch local res, err = kong.timer:named_at(string_format("JSON-RPC callback for node_id: %s, id: %d, method: %s", - self.node_id, payload.id, payload.method), + self.node_id, payload.id or 0, payload.method), 0, _M._dispatch, self, dispatch_cb, payload) - if not res then + if not res and payload.id then local reso, erro = self.outgoing:push(new_error(payload.id, jsonrpc.INTERNAL_ERROR)) if not reso then return nil, "unable to send \"INTERNAL_ERROR\" error back to client: " .. erro @@ -271,9 +282,13 @@ end function _M:call(node_id, method, params, callback) assert(node_id == self.node_id) - local id = self:_get_next_id() + local id - self.interest[id] = callback + -- notification has no callback or id + if callback then + id = self:_get_next_id() + self.interest[id] = callback + end return self.outgoing:push({ jsonrpc = jsonrpc.VERSION, From 33a43c702c81c0ff0e6090f6f95faec30a65a5b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Enrique=20Garc=C3=ADa=20Cota?= Date: Wed, 18 Dec 2024 10:41:09 +0100 Subject: [PATCH 10/18] docs(release): generate 3.9.0 changelog (#13935) (#14031) * docs(release): generate 3.9.0 changelog (#13935) * docs(release): generate 3.9.0 changelog * docs(changelog): apply suggestions for 3.9.0 changelog * chore(wasm): remove bundled datakit filter (#14012) * chore(wasm): remove bundled datakit filter * change log level * docs(changelog): update 3.9.0 changelog (#14016) --------- Co-authored-by: Michael Martin Co-authored-by: Andy Zhang --- CHANGELOG.md | 437 ++++++++++++++---- changelog/3.9.0/3.9.0.md | 275 +++++++++++ changelog/3.9.0/kong-manager/.gitkeep | 0 .../kong-manager/hide-plugin-scoping.yml | 3 + .../3.9.0/kong-manager/ui-improvements.yml | 22 + .../kong-manager/unified-redirection.yml | 3 + changelog/3.9.0/kong/.gitkeep | 0 .../kong/add-noble-numbat.yml | 0 .../kong/add_multiple_domain_for_gui.yml | 0 .../ai-anthropic-fix-function-calling.yml | 0 .../kong/ai-bedrock-fix-function-calling.yml | 0 .../kong/ai-bedrock-fix-guardrails.yml | 0 .../kong/ai-cohere-fix-function-calling.yml | 0 .../kong/ai-gemini-blocks-content-safety.yml | 0 .../kong/ai-gemini-fix-function-calling.yml | 0 .../ai-gemini-fix-transformer-plugins.yml | 0 .../ai-transformers-bad-error-handling.yml | 0 .../kong/bump-dockerfile-ubi9.yml | 0 .../kong/bump-lua-kong-nginx-module.yml | 0 .../kong/bump-lua-resty-aws.yml | 0 .../kong/bump-lua-resty-events.yml | 0 .../kong/bump-lua-resty-ljsonschema.yml | 0 .../kong/bump-lua-resty-lmdb-2.yml | 0 .../kong/bump-lua-resty-lmdb.yml | 0 .../kong/bump-ngx-wasm-module.yml | 0 .../kong/bump-prometheus-latency-bucket.yml | 0 .../kong/bump-wasmtime.yml | 0 .../kong/bump_openssl.yml | 0 .../kong/chore-clustering-log-level.yml | 0 .../{unreleased => 3.9.0}/kong/cp-dp-rpc.yml | 0 .../kong/deprecate_node_id.yml | 0 .../kong/feat-add-ada.yml | 0 .../kong/feat-add-huggingface-llm-driver.yml | 0 .../kong/feat-ai-proxy-disable-h2-alpn.yml | 0 .../kong/feat-api-yaml-media-type.yml | 0 .../kong/feat-correlation-id-order.yml | 0 .../kong/feat-disable-h2-alpn.yml | 0 .../kong/feat-kong-drain-cmd.yml | 0 .../kong/feat-pdk-clear-query-arg.yml | 0 ...ger-finer-resolution-and-total-latency.yml | 0 .../kong/feat-tracing-pdk-attributes.yml | 0 .../kong/fix-admin-api-for-empty-tags.yml | 0 .../kong/fix-ai-proxy-multi-modal-azure.yml | 0 .../kong/fix-ai-semantic-cache-model.yml | 0 ...fix-aws-lambda-multi-value-header-null.yml | 0 .../kong/fix-balancer-health-checker.yml | 0 ...ore-pass-ctx-to-log-init-worker-errors.yml | 0 ...x-jwt-plugin-rsa-public-key-b64decoded.yml | 0 .../kong/fix-key-auth-retain-query-order.yml | 0 .../kong/fix-loggly-hostname-notfound.yml | 0 ...-request-api-for-balancer-body-refresh.yml | 0 .../kong/fix-parse-nested-parameters.yml | 0 .../kong/fix-pdk-inspect-notice.yml | 0 .../kong/fix-plugin-conf-ws-id.yml | 0 .../kong/fix-retries-error-message.yml | 0 ...alues-mistaken-in-rate-limiting-plugin.yml | 0 .../kong/fix-rl-plugin-resp-hdr.yml | 0 .../fix-schema-validation-with-nil-field.yml | 0 .../kong/fix-vault-array-config.yml | 0 .../kong/fix-vault-cache-workspace-id.yml | 0 .../kong/fix-vault-stream-subsystem.yml | 0 .../kong/fix-wasm-check-missing-filters.yml | 0 .../kong/plugins-redirect.yml | 0 .../kong/prometheus-wasmx-metrics.yml | 0 changelog/3.9.0/kong/remove-datakit.yml | 2 + ...vert-http2-limitation-buffered-request.yml | 0 .../kong/wasm-filter-plugins.yml | 0 67 files changed, 642 insertions(+), 100 deletions(-) create mode 100644 changelog/3.9.0/3.9.0.md create mode 100644 changelog/3.9.0/kong-manager/.gitkeep create mode 100644 changelog/3.9.0/kong-manager/hide-plugin-scoping.yml create mode 100644 changelog/3.9.0/kong-manager/ui-improvements.yml create mode 100644 changelog/3.9.0/kong-manager/unified-redirection.yml create mode 100644 changelog/3.9.0/kong/.gitkeep rename changelog/{unreleased => 3.9.0}/kong/add-noble-numbat.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/add_multiple_domain_for_gui.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/ai-anthropic-fix-function-calling.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/ai-bedrock-fix-function-calling.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/ai-bedrock-fix-guardrails.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/ai-cohere-fix-function-calling.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/ai-gemini-blocks-content-safety.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/ai-gemini-fix-function-calling.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/ai-gemini-fix-transformer-plugins.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/ai-transformers-bad-error-handling.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump-dockerfile-ubi9.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump-lua-kong-nginx-module.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump-lua-resty-aws.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump-lua-resty-events.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump-lua-resty-ljsonschema.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump-lua-resty-lmdb-2.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump-lua-resty-lmdb.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump-ngx-wasm-module.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump-prometheus-latency-bucket.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump-wasmtime.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/bump_openssl.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/chore-clustering-log-level.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/cp-dp-rpc.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/deprecate_node_id.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/feat-add-ada.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/feat-add-huggingface-llm-driver.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/feat-ai-proxy-disable-h2-alpn.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/feat-api-yaml-media-type.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/feat-correlation-id-order.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/feat-disable-h2-alpn.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/feat-kong-drain-cmd.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/feat-pdk-clear-query-arg.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/feat-request-debguger-finer-resolution-and-total-latency.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/feat-tracing-pdk-attributes.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-admin-api-for-empty-tags.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-ai-proxy-multi-modal-azure.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-ai-semantic-cache-model.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-aws-lambda-multi-value-header-null.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-balancer-health-checker.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-core-pass-ctx-to-log-init-worker-errors.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-jwt-plugin-rsa-public-key-b64decoded.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-key-auth-retain-query-order.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-loggly-hostname-notfound.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-ngx-balancer-recreate-request-api-for-balancer-body-refresh.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-parse-nested-parameters.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-pdk-inspect-notice.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-plugin-conf-ws-id.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-retries-error-message.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-return-values-mistaken-in-rate-limiting-plugin.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-rl-plugin-resp-hdr.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-schema-validation-with-nil-field.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-vault-array-config.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-vault-cache-workspace-id.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-vault-stream-subsystem.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/fix-wasm-check-missing-filters.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/plugins-redirect.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/prometheus-wasmx-metrics.yml (100%) create mode 100644 changelog/3.9.0/kong/remove-datakit.yml rename changelog/{unreleased => 3.9.0}/kong/revert-http2-limitation-buffered-request.yml (100%) rename changelog/{unreleased => 3.9.0}/kong/wasm-filter-plugins.yml (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1bc3d8e14742..9792a790ac74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ # Table of Contents +- 3.9.0 - [3.8.1](#381) - [3.8.0](#380) - [3.7.1](#371) @@ -21,6 +22,248 @@ Individual unreleased changelog entries can be located at [changelog/unreleased](changelog/unreleased). They will be assembled into [CHANGELOG.md](CHANGELOG.md) once released. +## 3.9.0 + +### Kong + +#### Deprecations +##### Core + +- `node_id` in configuration has been deprecated. + [#13687](https://github.com/Kong/kong/issues/13687) + +#### Dependencies +##### Core + +- Bumped lua-kong-nginx-module from 0.11.0 to 0.11.1 to fix an issue where the upstream cert chain wasn't properly set. + [#12752](https://github.com/Kong/kong/issues/12752) + +- Bumped lua-resty-events to 0.3.1. Optimized the memory usage. + [#13097](https://github.com/Kong/kong/issues/13097) + +- Bumped lua-resty-lmdb to 1.6.0. Allowing page_size to be 1. + [#13908](https://github.com/Kong/kong/issues/13908) + +- Bumped lua-resty-lmdb to 1.5.0. Added page_size parameter to allow overriding page size from caller side. + [#12786](https://github.com/Kong/kong/issues/12786) + +##### Default + +- Kong Gateway now supports Ubuntu 24.04 (Noble Numbat) with both open-source and Enterprise packages. + [#13626](https://github.com/Kong/kong/issues/13626) + +- Bumped rpm dockerfile default base UBI 8 -> 9 + [#13574](https://github.com/Kong/kong/issues/13574) + +- Bumped lua-resty-aws to 1.5.4 to fix a bug inside region prefix generation. + [#12846](https://github.com/Kong/kong/issues/12846) + +- Bumped lua-resty-ljsonschema to 1.2.0, adding support for `null` as a valid option in `enum` types and properly calculation of utf8 string length instead of byte count + [#13783](https://github.com/Kong/kong/issues/13783) + +- Bumped `ngx_wasm_module` to `9136e463a6f1d80755ce66c88c3ddecd0eb5e25d` + [#12011](https://github.com/Kong/kong/issues/12011) + + +- Bumped `Wasmtime` version to `26.0.0` + [#12011](https://github.com/Kong/kong/issues/12011) + +- Bumped OpenSSL to 3.2.3 to fix unbounded memory growth with session handling in TLSv1.3 and other CVEs. + [#13448](https://github.com/Kong/kong/issues/13448) + + - **Wasm**: Removed the experimental datakit Wasm filter + [#14012](https://github.com/Kong/kong/issues/14012) + +#### Features +##### CLI Command +- Added the `kong drain` CLI command to make the `/status/ready` endpoint return a `503 Service Unavailable` response. + [#13838](https://github.com/Kong/kong/issues/13838) +##### Core + +- Added a new feature for Kong Manager that supports multiple domains, enabling dynamic cross-origin access for Admin API requests. + [#13664](https://github.com/Kong/kong/issues/13664) + +- Added an ADA dependency: WHATWG-compliant and fast URL parser. + [#13120](https://github.com/Kong/kong/issues/13120) + +- Addded a new LLM driver for interfacing with the Hugging Face inference API. +The driver supports both serverless and dedicated LLM instances hosted by +Hugging Face for conversational and text generation tasks. + [#13484](https://github.com/Kong/kong/issues/13484) + + +- Increased the priority order of the correlation id to 100001 from 1 so that the plugin can be used +with other plugins especially custom auth plugins. + [#13581](https://github.com/Kong/kong/issues/13581) + +- Added a `tls.disable_http2_alpn()` function patch for disabling HTTP/2 ALPN when performing a TLS handshake. + [#13709](https://github.com/Kong/kong/issues/13709) + + +- Improved the output of the request debugger: + - The resolution of field `total_time` is now in microseconds. + - A new field, `total_time_without_upstream`, shows the latency only introduced by Kong. + [#13460](https://github.com/Kong/kong/issues/13460) +- **proxy-wasm**: Added support for Wasm filters to be configured via the `/plugins` Admin API. + [#13843](https://github.com/Kong/kong/issues/13843) +##### PDK + +- Added `kong.service.request.clear_query_arg(name)` to PDK. + [#13619](https://github.com/Kong/kong/issues/13619) + +- Array and Map type span attributes are now supported by the tracing PDK + [#13818](https://github.com/Kong/kong/issues/13818) +##### Plugin +- **Prometheus**: Increased the upper limit of `KONG_LATENCY_BUCKETS` to 6000 to enhance latency tracking precision. + [#13588](https://github.com/Kong/kong/issues/13588) + +- **ai-proxy**: Disabled HTTP/2 ALPN handshake for connections on routes configured with AI-proxy. + [#13735](https://github.com/Kong/kong/issues/13735) + +- **Redirect**: Added a new plugin to redirect requests to another location. + [#13900](https://github.com/Kong/kong/issues/13900) + + +- **Prometheus**: Added support for Proxy-Wasm metrics. + [#13681](https://github.com/Kong/kong/issues/13681) + +##### Admin API +- **Admin API**: Added support for official YAML media-type (`application/yaml`) to the `/config` endpoint. + [#13713](https://github.com/Kong/kong/issues/13713) +##### Clustering + +- Added a remote procedure call (RPC) framework for Hybrid mode deployments. + [#12320](https://github.com/Kong/kong/issues/12320) + +#### Fixes +##### Core + +- Fixed an issue where the `ngx.balancer.recreate_request` API did not refresh the body buffer when `ngx.req.set_body_data` is used in the balancer phase. + [#13882](https://github.com/Kong/kong/issues/13882) + +- Fix to always pass `ngx.ctx` to `log_init_worker_errors` as otherwise it may runtime crash. + [#13731](https://github.com/Kong/kong/issues/13731) + +- Fixed an issue where the workspace ID was not included in the plugin config in the plugins iterator. + [#13377](https://github.com/Kong/kong/issues/13377) + +- Fixed an issue where the workspace id was not included in the plugin config in the plugins iterator. + [#13872](https://github.com/Kong/kong/issues/13872) + +- Fixed a 500 error triggered by unhandled nil fields during schema validation. + [#13861](https://github.com/Kong/kong/issues/13861) + +- **Vault**: Fixed an issue where array-like configuration fields cannot contain vault reference. + [#13953](https://github.com/Kong/kong/issues/13953) + +- **Vault**: Fixed an issue where updating a vault entity in a non-default workspace wouldn't take effect. + [#13610](https://github.com/Kong/kong/issues/13610) + +- **Vault**: Fixed an issue where vault reference in kong configuration cannot be dereferenced when both http and stream subsystems are enabled. + [#13953](https://github.com/Kong/kong/issues/13953) + +- **proxy-wasm:** Added a check that prevents Kong from starting when the +database contains invalid Wasm filters. + [#13764](https://github.com/Kong/kong/issues/13764) + +- Fixed an issue where the `kong.request.enable_buffering` couldn't be used when the downstream used HTTP/2. + [#13614](https://github.com/Kong/kong/issues/13614) +##### PDK + +- Lined up the `kong.log.inspect` function to log at `notice` level as documented + [#13642](https://github.com/Kong/kong/issues/13642) + +- Fix error message for invalid retries variable + [#13605](https://github.com/Kong/kong/issues/13605) + +##### Plugin + +- **ai-proxy**: Fixed a bug where tools (function) calls to Anthropic would return empty results. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed a bug where tools (function) calls to Bedrock would return empty results. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed a bug where Bedrock Guardrail config was ignored. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed a bug where tools (function) calls to Cohere would return empty results. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed a bug where Gemini provider would return an error if content safety failed in AI Proxy. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed a bug where tools (function) calls to Gemini (or via Vertex) would return empty results. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed an issue where AI Transformer plugins always returned a 404 error when using 'Google One' Gemini subscriptions. + [#13703](https://github.com/Kong/kong/issues/13703) + + +- **ai-transformers**: Fixed a bug where the correct LLM error message was not propagated to the caller. + [#13703](https://github.com/Kong/kong/issues/13703) + +- **AI-Proxy**: Fixed an issue where multi-modal requests were blocked on the Azure AI provider. + [#13702](https://github.com/Kong/kong/issues/13702) + + +- Fixed an bug that AI semantic cache can't use request provided models + [#13627](https://github.com/Kong/kong/issues/13627) + +- **AWS-Lambda**: Fixed an issue in proxy integration mode that caused an internal server error when the `multiValueHeaders` was null. + [#13533](https://github.com/Kong/kong/issues/13533) + +- **jwt**: ensure `rsa_public_key` isn't base64-decoded. + [#13717](https://github.com/Kong/kong/issues/13717) + +- **key-auth**: Fixed an issue with the order of query arguments, ensuring that arguments retain order when hiding the credentials. + [#13619](https://github.com/Kong/kong/issues/13619) + +- **rate-limiting**: Fixed a bug where the returned values from `get_redis_connection()` were incorrect. + [#13613](https://github.com/Kong/kong/issues/13613) + +- **rate-limiting**: Fixed an issue that caused an HTTP 500 error when `hide_client_headers` was set to `true` and the request exceeded the rate limit. + [#13722](https://github.com/Kong/kong/issues/13722) +##### Admin API + +- Fix for querying admin API entities with empty tags + [#13723](https://github.com/Kong/kong/issues/13723) + +- Fixed an issue where nested parameters couldn't be parsed correctly when using `form-urlencoded` requests. + [#13668](https://github.com/Kong/kong/issues/13668) +##### Clustering + +- **Clustering**: Adjusted error log levels for control plane connections. + [#13863](https://github.com/Kong/kong/issues/13863) +##### Default + +- **Loggly**: Fixed an issue where `/bin/hostname` missing caused an error warning on startup. + [#13788](https://github.com/Kong/kong/issues/13788) + +### Kong-Manager + +#### Fixes +##### Default + +- Kong Manager will now hide the scope change field when creating/editing a scoped plugin from another entity. + [#297](https://github.com/Kong/kong-manager/issues/297) + + +- Improved the user experience in Kong Manager by fixing various UI-related issues. + [#277](https://github.com/Kong/kong-manager/issues/277) [#283](https://github.com/Kong/kong-manager/issues/283) [#286](https://github.com/Kong/kong-manager/issues/286) [#287](https://github.com/Kong/kong-manager/issues/287) [#288](https://github.com/Kong/kong-manager/issues/288) [#291](https://github.com/Kong/kong-manager/issues/291) [#293](https://github.com/Kong/kong-manager/issues/293) [#295](https://github.com/Kong/kong-manager/issues/295) [#298](https://github.com/Kong/kong-manager/issues/298) [#302](https://github.com/Kong/kong-manager/issues/302) [#304](https://github.com/Kong/kong-manager/issues/304) [#306](https://github.com/Kong/kong-manager/issues/306) [#309](https://github.com/Kong/kong-manager/issues/309) [#317](https://github.com/Kong/kong-manager/issues/317) [#319](https://github.com/Kong/kong-manager/issues/319) [#322](https://github.com/Kong/kong-manager/issues/322) [#325](https://github.com/Kong/kong-manager/issues/325) [#329](https://github.com/Kong/kong-manager/issues/329) [#330](https://github.com/Kong/kong-manager/issues/330) + + +- Unified the redirection logic in Kong Manager upon entity operations. + [#289](https://github.com/Kong/kong-manager/issues/289) + + +>>>>>>> 4fb6d639ab (docs(changelog): update 3.9.0 changelog (#14016)) ## 3.8.1 ## Kong @@ -30,26 +273,22 @@ Individual unreleased changelog entries can be located at [changelog/unreleased] - Bumped lua-kong-nginx-module from 0.11.0 to 0.11.1 to fix an issue where the upstream cert chain wasn't properly set. [#12752](https://github.com/Kong/kong/issues/12752) - [KAG-4050](https://konghq.atlassian.net/browse/KAG-4050) ##### Default - Bumped lua-resty-aws to 1.5.4, to fix a bug inside region prefix generating [#12846](https://github.com/Kong/kong/issues/12846) - [KAG-3424](https://konghq.atlassian.net/browse/KAG-3424) [FTI-5732](https://konghq.atlassian.net/browse/FTI-5732) #### Features ##### Plugin - **Prometheus**: Bumped KONG_LATENCY_BUCKETS bucket's maximal capacity to 6000 [#13797](https://github.com/Kong/kong/issues/13797) - [FTI-5990](https://konghq.atlassian.net/browse/FTI-5990) #### Fixes ##### Core - **Vault**: Fixed an issue where updating a vault entity in a non-default workspace will not take effect. [#13670](https://github.com/Kong/kong/issues/13670) - [FTI-6152](https://konghq.atlassian.net/browse/FTI-6152) ##### Plugin - **ai-proxy**: Fixed an issue where AI Transformer plugins always returned a 404 error when using 'Google One' Gemini subscriptions. @@ -64,15 +303,13 @@ Individual unreleased changelog entries can be located at [changelog/unreleased] [#13633](https://github.com/Kong/kong/issues/13633) -- **Rate-Limiting**: Fixed an issue that caused a 500 error when using the rate-limiting plugin. When the `hide_client_headers` option is set to true and a 429 error is triggered, +- **Rate-Limiting**: Fixed an issue that caused a 500 error when using the rate-limiting plugin. When the `hide_client_headers` option is set to true and a 429 error is triggered, it should return a 429 error code instead of a 500 error code. [#13759](https://github.com/Kong/kong/issues/13759) - [KAG-5492](https://konghq.atlassian.net/browse/KAG-5492) ##### Admin API - Fixed an issue where sending `tags= `(empty parameter) resulted in 500 error. Now, Kong returns a 400 error, as empty explicit tags are not allowed. [#13813](https://github.com/Kong/kong/issues/13813) - [KAG-5496](https://konghq.atlassian.net/browse/KAG-5496) ## 3.8.0 @@ -84,16 +321,16 @@ it should return a 429 error code instead of a 500 error code. - Fixed an inefficiency issue in the Luajit hashing algorithm [#13240](https://github.com/Kong/kong/issues/13240) - + ##### Core - Removed unnecessary DNS client initialization [#13479](https://github.com/Kong/kong/issues/13479) - + - Improved latency performance when gzipping/gunzipping large data (such as CP/DP config data). [#13338](https://github.com/Kong/kong/issues/13338) - + #### Deprecations @@ -101,25 +338,25 @@ it should return a 429 error code instead of a 500 error code. - Debian 10, CentOS 7, and RHEL 7 reached their End of Life (EOL) dates on June 30, 2024. As of version 3.8.0.0 onward, Kong is not building installation packages or Docker images for these operating systems. Kong is no longer providing official support for any Kong version running on these systems. [#13468](https://github.com/Kong/kong/issues/13468) - - - - + + + + #### Dependencies ##### Core - Bumped lua-resty-acme to 0.15.0 to support username/password auth with redis. [#12909](https://github.com/Kong/kong/issues/12909) - + - Bumped lua-resty-aws to 1.5.3 to fix a bug related to STS regional endpoint. [#12846](https://github.com/Kong/kong/issues/12846) - + - Bumped lua-resty-healthcheck from 3.0.1 to 3.1.0 to fix an issue that was causing high memory usage [#13038](https://github.com/Kong/kong/issues/13038) - + - Bumped lua-resty-lmdb to 1.4.3 to get fixes from the upstream (lmdb 0.9.33), which resolved numerous race conditions and fixed a cursor issue. [#12786](https://github.com/Kong/kong/issues/12786) @@ -131,18 +368,18 @@ it should return a 429 error code instead of a 500 error code. - Bumped OpenResty to 1.25.3.2 to improve the performance of the LuaJIT hash computation. [#12327](https://github.com/Kong/kong/issues/12327) - + - Bumped PCRE2 to 10.44 to fix some bugs and tidy-up the release (nothing important) [#12366](https://github.com/Kong/kong/issues/12366) - - - + + + - Introduced a yieldable JSON library `lua-resty-simdjson`, which would improve the latency significantly. [#13421](https://github.com/Kong/kong/issues/13421) - + ##### Default - Bumped lua-protobuf 0.5.2 @@ -151,7 +388,7 @@ which would improve the latency significantly. - Bumped LuaRocks from 3.11.0 to 3.11.1 [#12662](https://github.com/Kong/kong/issues/12662) - + - Bumped `ngx_wasm_module` to `96b4e27e10c63b07ed40ea88a91c22f23981db35` [#12011](https://github.com/Kong/kong/issues/12011) @@ -159,44 +396,44 @@ which would improve the latency significantly. - Bumped `Wasmtime` version to `23.0.2` [#13567](https://github.com/Kong/kong/pull/13567) - + - Made the RPM package relocatable with the default prefix set to `/`. [#13468](https://github.com/Kong/kong/issues/13468) - + #### Features ##### Configuration - Configure Wasmtime module cache when Wasm is enabled [#12930](https://github.com/Kong/kong/issues/12930) - + ##### Core - **prometheus**: Added `ai_requests_total`, `ai_cost_total` and `ai_tokens_total` metrics in the Prometheus plugin to start counting AI usage. [#13148](https://github.com/Kong/kong/issues/13148) - + - Added a new configuration `concurrency_limit`(integer, default to 1) for Queue to specify the number of delivery timers. Note that setting `concurrency_limit` to `-1` means no limit at all, and each HTTP log entry would create an individual timer for sending. [#13332](https://github.com/Kong/kong/issues/13332) - + - Append gateway info to upstream `Via` header like `1.1 kong/3.8.0`, and optionally to response `Via` header if it is present in the `headers` config of "kong.conf", like `2 kong/3.8.0`, according to `RFC7230` and `RFC9110`. [#12733](https://github.com/Kong/kong/issues/12733) - + - Starting from this version, a new DNS client library has been implemented and added into Kong, which is disabled by default. The new DNS client library has the following changes - Introduced global caching for DNS records across workers, significantly reducing the query load on DNS servers. - Introduced observable statistics for the new DNS client, and a new Status API `/status/dns` to retrieve them. - Simplified the logic and make it more standardized [#12305](https://github.com/Kong/kong/issues/12305) - + ##### PDK - Added `0` to support unlimited body size. When parameter `max_allowed_file_size` is `0`, `get_raw_body` will return the entire body, but the size of this body will still be limited by Nginx's `client_max_body_size`. [#13431](https://github.com/Kong/kong/issues/13431) - + - Extend kong.request.get_body and kong.request.get_raw_body to read from buffered file [#13158](https://github.com/Kong/kong/issues/13158) @@ -204,19 +441,19 @@ according to `RFC7230` and `RFC9110`. - Added a new PDK module `kong.telemetry` and function: `kong.telemetry.log` to generate log entries to be reported via the OpenTelemetry plugin. [#13329](https://github.com/Kong/kong/issues/13329) - + ##### Plugin - **acl:** Added a new config `always_use_authenticated_groups` to support using authenticated groups even when an authenticated consumer already exists. [#13184](https://github.com/Kong/kong/issues/13184) - + - AI plugins: retrieved latency data and pushed it to logs and metrics. [#13428](https://github.com/Kong/kong/issues/13428) - Allow AI plugin to read request from buffered file [#13158](https://github.com/Kong/kong/pull/13158) - + - **AI-proxy-plugin**: Add `allow_override` option to allow overriding the upstream model auth parameter or header from the caller's request. [#13158](https://github.com/Kong/kong/issues/13158) @@ -226,12 +463,12 @@ to generate log entries to be reported via the OpenTelemetry plugin. [#13582](https://github.com/Kong/kong/issues/13582) -- Kong AI Gateway (AI Proxy and associated plugin family) now supports +- Kong AI Gateway (AI Proxy and associated plugin family) now supports all AWS Bedrock "Converse API" models. [#12948](https://github.com/Kong/kong/issues/12948) -- Kong AI Gateway (AI Proxy and associated plugin family) now supports +- Kong AI Gateway (AI Proxy and associated plugin family) now supports the Google Gemini "chat" (generateContent) interface. [#12948](https://github.com/Kong/kong/issues/12948) @@ -247,186 +484,186 @@ the Google Gemini "chat" (generateContent) interface. - "**AWS-Lambda**: Added support for a configurable STS endpoint with the new configuration field `aws_sts_endpoint_url`. [#13388](https://github.com/Kong/kong/issues/13388) - + - **AWS-Lambda**: A new configuration field `empty_arrays_mode` is now added to control whether Kong should send `[]` empty arrays (returned by Lambda function) as `[]` empty arrays or `{}` empty objects in JSON responses.` [#13084](https://github.com/Kong/kong/issues/13084) - - - + + + - Added support for json_body rename in response-transformer plugin [#13131](https://github.com/Kong/kong/issues/13131) - + - **OpenTelemetry:** Added support for OpenTelemetry formatted logs. [#13291](https://github.com/Kong/kong/issues/13291) - + - **standard-webhooks**: Added standard webhooks plugin. [#12757](https://github.com/Kong/kong/issues/12757) - **Request-Transformer**: Fixed an issue where renamed query parameters, url-encoded body parameters, and json body parameters were not handled properly when target name is the same as the source name in the request. [#13358](https://github.com/Kong/kong/issues/13358) - + ##### Admin API - Added support for brackets syntax for map fields configuration via the Admin API [#13313](https://github.com/Kong/kong/issues/13313) - + #### Fixes ##### CLI Command - Fixed an issue where some debug level error logs were not being displayed by the CLI. [#13143](https://github.com/Kong/kong/issues/13143) - + ##### Configuration - Re-enabled the Lua DNS resolver from proxy-wasm by default. [#13424](https://github.com/Kong/kong/issues/13424) - + ##### Core - Fixed an issue where luarocks-admin was not available in /usr/local/bin. [#13372](https://github.com/Kong/kong/issues/13372) - + - Fixed an issue where 'read' was not always passed to Postgres read-only database operations. [#13530](https://github.com/Kong/kong/issues/13530) - + - Deprecated shorthand fields don't take precedence over replacement fields when both are specified. [#13486](https://github.com/Kong/kong/issues/13486) - + - Fixed an issue where `lua-nginx-module` context was cleared when `ngx.send_header()` triggered `filter_finalize` [openresty/lua-nginx-module#2323](https://github.com/openresty/lua-nginx-module/pull/2323). [#13316](https://github.com/Kong/kong/issues/13316) - + - Changed the way deprecated shorthand fields are used with new fields. If the new field contains null it allows for deprecated field to overwrite it if both are present in the request. [#13592](https://github.com/Kong/kong/issues/13592) - + - Fixed an issue where unnecessary uninitialized variable error log is reported when 400 bad requests were received. [#13201](https://github.com/Kong/kong/issues/13201) - + - Fixed an issue where the URI captures are unavailable when the first capture group is absent. [#13024](https://github.com/Kong/kong/issues/13024) - + - Fixed an issue where the priority field can be set in a traditional mode route When 'router_flavor' is configured as 'expressions'. [#13142](https://github.com/Kong/kong/issues/13142) - + - Fixed an issue where setting `tls_verify` to `false` didn't override the global level `proxy_ssl_verify`. [#13470](https://github.com/Kong/kong/issues/13470) - + - Fixed an issue where the sni cache isn't invalidated when a sni is updated. [#13165](https://github.com/Kong/kong/issues/13165) - + - The kong.logrotate configuration file will no longer be overwritten during upgrade. When upgrading, set the environment variable `DEBIAN_FRONTEND=noninteractive` on Debian/Ubuntu to avoid any interactive prompts and enable fully automatic upgrades. [#13348](https://github.com/Kong/kong/issues/13348) - + - Fixed an issue where the Vault secret cache got refreshed during `resurrect_ttl` time and could not be fetched by other workers. [#13561](https://github.com/Kong/kong/issues/13561) - + - Error logs during Vault secret rotation are now logged at the `notice` level instead of `warn`. [#13540](https://github.com/Kong/kong/issues/13540) - + - Fix a bug that the `host_header` attribute of upstream entity can not be set correctly in requests to upstream as Host header when retries to upstream happen. [#13135](https://github.com/Kong/kong/issues/13135) - + - Moved internal Unix sockets to a subdirectory (`sockets`) of the Kong prefix. [#13409](https://github.com/Kong/kong/issues/13409) - + - Changed the behaviour of shorthand fields that are used to describe deprecated fields. If both fields are sent in the request and their values mismatch - the request will be rejected. [#13594](https://github.com/Kong/kong/issues/13594) - + - Reverted DNS client to original behaviour of ignoring ADDITIONAL SECTION in DNS responses. [#13278](https://github.com/Kong/kong/issues/13278) - + - Shortened names of internal Unix sockets to avoid exceeding the socket name limit. [#13571](https://github.com/Kong/kong/issues/13571) - + ##### PDK - **PDK**: Fixed a bug that log serializer will log `upstream_status` as nil in the requests that contains subrequest [#12953](https://github.com/Kong/kong/issues/12953) - + - **Vault**: Reference ending with slash when parsed should not return a key. [#13538](https://github.com/Kong/kong/issues/13538) - + - Fixed an issue that pdk.log.serialize() will throw an error when JSON entity set by serialize_value contains json.null [#13376](https://github.com/Kong/kong/issues/13376) - + ##### Plugin -- **AI-proxy-plugin**: Fixed a bug where certain Azure models would return partial tokens/words +- **AI-proxy-plugin**: Fixed a bug where certain Azure models would return partial tokens/words when in response-streaming mode. [#13000](https://github.com/Kong/kong/issues/13000) - -- **AI-Transformer-Plugins**: Fixed a bug where cloud identity authentication + +- **AI-Transformer-Plugins**: Fixed a bug where cloud identity authentication was not used in `ai-request-transformer` and `ai-response-transformer` plugins. [#13487](https://github.com/Kong/kong/issues/13487) -- **AI-proxy-plugin**: Fixed a bug where Cohere and Anthropic providers don't read the `model` parameter properly +- **AI-proxy-plugin**: Fixed a bug where Cohere and Anthropic providers don't read the `model` parameter properly from the caller's request body. [#13000](https://github.com/Kong/kong/issues/13000) - -- **AI-proxy-plugin**: Fixed a bug where using "OpenAI Function" inference requests would log a + +- **AI-proxy-plugin**: Fixed a bug where using "OpenAI Function" inference requests would log a request error, and then hang until timeout. [#13000](https://github.com/Kong/kong/issues/13000) - -- **AI-proxy-plugin**: Fixed a bug where AI Proxy would still allow callers to specify their own model, + +- **AI-proxy-plugin**: Fixed a bug where AI Proxy would still allow callers to specify their own model, ignoring the plugin-configured model name. [#13000](https://github.com/Kong/kong/issues/13000) - -- **AI-proxy-plugin**: Fixed a bug where AI Proxy would not take precedence of the + +- **AI-proxy-plugin**: Fixed a bug where AI Proxy would not take precedence of the plugin's configured model tuning options, over those in the user's LLM request. [#13000](https://github.com/Kong/kong/issues/13000) - -- **AI-proxy-plugin**: Fixed a bug where setting OpenAI SDK model parameter "null" caused analytics + +- **AI-proxy-plugin**: Fixed a bug where setting OpenAI SDK model parameter "null" caused analytics to not be written to the logging plugin(s). [#13000](https://github.com/Kong/kong/issues/13000) - + - **ACME**: Fixed an issue of DP reporting that deprecated config fields are used when configuration from CP is pushed [#13069](https://github.com/Kong/kong/issues/13069) - + - **ACME**: Fixed an issue where username and password were not accepted as valid authentication methods. [#13496](https://github.com/Kong/kong/issues/13496) - + - **AI-Proxy**: Fixed issue when response is gzipped even if client doesn't accept. [#13155](https://github.com/Kong/kong/issues/13155) - **Prometheus**: Fixed an issue where CP/DP compatibility check was missing for the new configuration field `ai_metrics`. [#13417](https://github.com/Kong/kong/issues/13417) - + - Fixed certain AI plugins cannot be applied per consumer or per service. [#13209](https://github.com/Kong/kong/issues/13209) @@ -442,15 +679,15 @@ to not be written to the logging plugin(s). - **AWS-Lambda**: Fixed an issue that the `version` field is not set in the request payload when `awsgateway_compatible` is enabled. [#13018](https://github.com/Kong/kong/issues/13018) - + - **correlation-id**: Fixed an issue where the plugin would not work if we explicitly set the `generator` to `null`. [#13439](https://github.com/Kong/kong/issues/13439) - + - **CORS**: Fixed an issue where the `Access-Control-Allow-Origin` header was not sent when `conf.origins` has multiple entries but includes `*`. [#13334](https://github.com/Kong/kong/issues/13334) - + - **grpc-gateway**: When there is a JSON decoding error, respond with status 400 and error information in the body instead of status 500. [#12971](https://github.com/Kong/kong/issues/12971) @@ -461,43 +698,43 @@ to not be written to the logging plugin(s). - "**AI Plugins**: Fixed an issue for multi-modal inputs are not properly validated and calculated. [#13445](https://github.com/Kong/kong/issues/13445) - + - **OpenTelemetry:** Fixed an issue where migration fails when upgrading from below version 3.3 to 3.7. [#13391](https://github.com/Kong/kong/issues/13391) - + - **OpenTelemetry / Zipkin**: remove redundant deprecation warnings [#13220](https://github.com/Kong/kong/issues/13220) - + - **Basic-Auth**: Fix an issue of realm field not recognized for older kong versions (before 3.6) [#13042](https://github.com/Kong/kong/issues/13042) - + - **Key-Auth**: Fix an issue of realm field not recognized for older kong versions (before 3.7) [#13042](https://github.com/Kong/kong/issues/13042) - + - **Request Size Limiting**: Fixed an issue where the body size doesn't get checked when the request body is buffered to a temporary file. [#13303](https://github.com/Kong/kong/issues/13303) - + - **Response-RateLimiting**: Fixed an issue of DP reporting that deprecated config fields are used when configuration from CP is pushed [#13069](https://github.com/Kong/kong/issues/13069) - + - **Rate-Limiting**: Fixed an issue of DP reporting that deprecated config fields are used when configuration from CP is pushed [#13069](https://github.com/Kong/kong/issues/13069) - + - **OpenTelemetry:** Improved accuracy of sampling decisions. [#13275](https://github.com/Kong/kong/issues/13275) - + - **hmac-auth**: Add WWW-Authenticate headers to 401 responses. [#11791](https://github.com/Kong/kong/issues/11791) - + - **Prometheus**: Improved error logging when having inconsistent labels count. [#13020](https://github.com/Kong/kong/issues/13020) @@ -505,15 +742,15 @@ to not be written to the logging plugin(s). - **jwt**: Add WWW-Authenticate headers to 401 responses. [#11792](https://github.com/Kong/kong/issues/11792) - + - **ldap-auth**: Add WWW-Authenticate headers to all 401 responses. [#11820](https://github.com/Kong/kong/issues/11820) - + - **OAuth2**: Add WWW-Authenticate headers to all 401 responses and realm option. [#11833](https://github.com/Kong/kong/issues/11833) - + - **proxy-cache**: Fixed an issue where the Age header was not being updated correctly when serving cached responses. [#13387](https://github.com/Kong/kong/issues/13387) @@ -531,7 +768,7 @@ to not be written to the logging plugin(s). - Fixed an issue where hybrid mode not working if the forward proxy password contains special character(#). Note that the `proxy_server` configuration parameter still needs to be url-encoded. [#13457](https://github.com/Kong/kong/issues/13457) - + ##### Default - **AI-proxy**: A configuration validation is added to prevent from enabling `log_statistics` upon diff --git a/changelog/3.9.0/3.9.0.md b/changelog/3.9.0/3.9.0.md new file mode 100644 index 000000000000..238e17172072 --- /dev/null +++ b/changelog/3.9.0/3.9.0.md @@ -0,0 +1,275 @@ +## Kong + + + + +### Deprecations +#### Core + +- `node_id` in configuration has been deprecated. + [#13687](https://github.com/Kong/kong/issues/13687) + [FTI-6221](https://konghq.atlassian.net/browse/FTI-6221) + +### Dependencies +#### Core + +- Bumped lua-kong-nginx-module from 0.11.0 to 0.11.1 to fix an issue where the upstream cert chain wasn't properly set. + [#12752](https://github.com/Kong/kong/issues/12752) + [KAG-4050](https://konghq.atlassian.net/browse/KAG-4050) + +- Bumped lua-resty-events to 0.3.1. Optimized the memory usage. + [#13097](https://github.com/Kong/kong/issues/13097) + [KAG-4480](https://konghq.atlassian.net/browse/KAG-4480) [KAG-4586](https://konghq.atlassian.net/browse/KAG-4586) + +- Bumped lua-resty-lmdb to 1.6.0. Allowing page_size to be 1. + [#13908](https://github.com/Kong/kong/issues/13908) + [KAG-5875](https://konghq.atlassian.net/browse/KAG-5875) + +- Bumped lua-resty-lmdb to 1.5.0. Added page_size parameter to allow overriding page size from caller side. + [#12786](https://github.com/Kong/kong/issues/12786) + +#### Default + +- Kong Gateway now supports Ubuntu 24.04 (Noble Numbat) with both open-source and Enterprise packages. + [#13626](https://github.com/Kong/kong/issues/13626) + [KAG-4672](https://konghq.atlassian.net/browse/KAG-4672) + +- Bumped rpm dockerfile default base UBI 8 -> 9 + [#13574](https://github.com/Kong/kong/issues/13574) + +- Bumped lua-resty-aws to 1.5.4 to fix a bug inside region prefix generation. + [#12846](https://github.com/Kong/kong/issues/12846) + [KAG-3424](https://konghq.atlassian.net/browse/KAG-3424) [FTI-5732](https://konghq.atlassian.net/browse/FTI-5732) + +- Bumped lua-resty-ljsonschema to 1.2.0, adding support for `null` as a valid option in `enum` types and properly calculation of utf8 string length instead of byte count + [#13783](https://github.com/Kong/kong/issues/13783) + [FTI-5870](https://konghq.atlassian.net/browse/FTI-5870) [FTI-6171](https://konghq.atlassian.net/browse/FTI-6171) + +- Bumped `ngx_wasm_module` to `9136e463a6f1d80755ce66c88c3ddecd0eb5e25d` + [#12011](https://github.com/Kong/kong/issues/12011) + + +- Bumped `Wasmtime` version to `26.0.0` + [#12011](https://github.com/Kong/kong/issues/12011) + +- Bumped OpenSSL to 3.2.3 to fix unbounded memory growth with session handling in TLSv1.3 and other CVEs. + [#13448](https://github.com/Kong/kong/issues/13448) + [KAG-5075](https://konghq.atlassian.net/browse/KAG-5075) + +- **Wasm**: Removed the experimental datakit Wasm filter + [#14012](https://github.com/Kong/kong/issues/14012) + [KAG-6021](https://konghq.atlassian.net/browse/KAG-6021) + +### Features +#### CLI Command +- Added the `kong drain` CLI command to make the `/status/ready` endpoint return a `503 Service Unavailable` response. + [#13838](https://github.com/Kong/kong/issues/13838) + [FTI-6276](https://konghq.atlassian.net/browse/FTI-6276) +#### Core + +- Added a new feature for Kong Manager that supports multiple domains, enabling dynamic cross-origin access for Admin API requests. + [#13664](https://github.com/Kong/kong/issues/13664) + +- Added an ADA dependency: WHATWG-compliant and fast URL parser. + [#13120](https://github.com/Kong/kong/issues/13120) + [KAG-5106](https://konghq.atlassian.net/browse/KAG-5106) + +- Addded a new LLM driver for interfacing with the Hugging Face inference API. +The driver supports both serverless and dedicated LLM instances hosted by +Hugging Face for conversational and text generation tasks. + [#13484](https://github.com/Kong/kong/issues/13484) + + +- Increased the priority order of the correlation id to 100001 from 1 so that the plugin can be used +with other plugins especially custom auth plugins. + [#13581](https://github.com/Kong/kong/issues/13581) + +- Added a `tls.disable_http2_alpn()` function patch for disabling HTTP/2 ALPN when performing a TLS handshake. + [#13709](https://github.com/Kong/kong/issues/13709) + + +- Improved the output of the request debugger: + - The resolution of field `total_time` is now in microseconds. + - A new field, `total_time_without_upstream`, shows the latency only introduced by Kong. + [#13460](https://github.com/Kong/kong/issues/13460) + [KAG-4733](https://konghq.atlassian.net/browse/KAG-4733) [FTI-5989](https://konghq.atlassian.net/browse/FTI-5989) +- **proxy-wasm**: Added support for Wasm filters to be configured via the `/plugins` Admin API. + [#13843](https://github.com/Kong/kong/issues/13843) + [KAG-5616](https://konghq.atlassian.net/browse/KAG-5616) +#### PDK + +- Added `kong.service.request.clear_query_arg(name)` to PDK. + [#13619](https://github.com/Kong/kong/issues/13619) + [KAG-5238](https://konghq.atlassian.net/browse/KAG-5238) + +- Array and Map type span attributes are now supported by the tracing PDK + [#13818](https://github.com/Kong/kong/issues/13818) + [KAG-5162](https://konghq.atlassian.net/browse/KAG-5162) +#### Plugin +- **Prometheus**: Increased the upper limit of `KONG_LATENCY_BUCKETS` to 6000 to enhance latency tracking precision. + [#13588](https://github.com/Kong/kong/issues/13588) + [FTI-5990](https://konghq.atlassian.net/browse/FTI-5990) + +- **ai-proxy**: Disabled HTTP/2 ALPN handshake for connections on routes configured with AI-proxy. + [#13735](https://github.com/Kong/kong/issues/13735) + +- **Redirect**: Added a new plugin to redirect requests to another location. + [#13900](https://github.com/Kong/kong/issues/13900) + + +- **Prometheus**: Added support for Proxy-Wasm metrics. + [#13681](https://github.com/Kong/kong/issues/13681) + +#### Admin API +- **Admin API**: Added support for official YAML media-type (`application/yaml`) to the `/config` endpoint. + [#13713](https://github.com/Kong/kong/issues/13713) + [KAG-5474](https://konghq.atlassian.net/browse/KAG-5474) +#### Clustering + +- Added a remote procedure call (RPC) framework for Hybrid mode deployments. + [#12320](https://github.com/Kong/kong/issues/12320) + [KAG-623](https://konghq.atlassian.net/browse/KAG-623) [KAG-3751](https://konghq.atlassian.net/browse/KAG-3751) + +### Fixes +#### Core + +- Fixed an issue where the `ngx.balancer.recreate_request` API did not refresh the body buffer when `ngx.req.set_body_data` is used in the balancer phase. + [#13882](https://github.com/Kong/kong/issues/13882) + [KAG-5821](https://konghq.atlassian.net/browse/KAG-5821) + +- Fix to always pass `ngx.ctx` to `log_init_worker_errors` as otherwise it may runtime crash. + [#13731](https://github.com/Kong/kong/issues/13731) + +- Fixed an issue where the workspace ID was not included in the plugin config in the plugins iterator. + [#13377](https://github.com/Kong/kong/issues/13377) + +- Fixed an issue where the workspace id was not included in the plugin config in the plugins iterator. + [#13872](https://github.com/Kong/kong/issues/13872) + [FTI-6200](https://konghq.atlassian.net/browse/FTI-6200) + +- Fixed a 500 error triggered by unhandled nil fields during schema validation. + [#13861](https://github.com/Kong/kong/issues/13861) + [FTI-6336](https://konghq.atlassian.net/browse/FTI-6336) + +- **Vault**: Fixed an issue where array-like configuration fields cannot contain vault reference. + [#13953](https://github.com/Kong/kong/issues/13953) + [FTI-6163](https://konghq.atlassian.net/browse/FTI-6163) + +- **Vault**: Fixed an issue where updating a vault entity in a non-default workspace wouldn't take effect. + [#13610](https://github.com/Kong/kong/issues/13610) + [FTI-6152](https://konghq.atlassian.net/browse/FTI-6152) + +- **Vault**: Fixed an issue where vault reference in kong configuration cannot be dereferenced when both http and stream subsystems are enabled. + [#13953](https://github.com/Kong/kong/issues/13953) + [FTI-6163](https://konghq.atlassian.net/browse/FTI-6163) + +- **proxy-wasm:** Added a check that prevents Kong from starting when the +database contains invalid Wasm filters. + [#13764](https://github.com/Kong/kong/issues/13764) + [KAG-2636](https://konghq.atlassian.net/browse/KAG-2636) + +- Fixed an issue where the `kong.request.enable_buffering` couldn't be used when the downstream used HTTP/2. + [#13614](https://github.com/Kong/kong/issues/13614) + [FTI-5725](https://konghq.atlassian.net/browse/FTI-5725) +#### PDK + +- Lined up the `kong.log.inspect` function to log at `notice` level as documented + [#13642](https://github.com/Kong/kong/issues/13642) + [FTI-6215](https://konghq.atlassian.net/browse/FTI-6215) + +- Fix error message for invalid retries variable + [#13605](https://github.com/Kong/kong/issues/13605) + +#### Plugin + +- **ai-proxy**: Fixed a bug where tools (function) calls to Anthropic would return empty results. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed a bug where tools (function) calls to Bedrock would return empty results. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed a bug where Bedrock Guardrail config was ignored. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed a bug where tools (function) calls to Cohere would return empty results. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed a bug where Gemini provider would return an error if content safety failed in AI Proxy. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed a bug where tools (function) calls to Gemini (or via Vertex) would return empty results. + [#13760](https://github.com/Kong/kong/issues/13760) + + +- **ai-proxy**: Fixed an issue where AI Transformer plugins always returned a 404 error when using 'Google One' Gemini subscriptions. + [#13703](https://github.com/Kong/kong/issues/13703) + + +- **ai-transformers**: Fixed a bug where the correct LLM error message was not propagated to the caller. + [#13703](https://github.com/Kong/kong/issues/13703) + +- **AI-Proxy**: Fixed an issue where multi-modal requests were blocked on the Azure AI provider. + [#13702](https://github.com/Kong/kong/issues/13702) + + +- Fixed an bug that AI semantic cache can't use request provided models + [#13627](https://github.com/Kong/kong/issues/13627) + +- **AWS-Lambda**: Fixed an issue in proxy integration mode that caused an internal server error when the `multiValueHeaders` was null. + [#13533](https://github.com/Kong/kong/issues/13533) + [FTI-6168](https://konghq.atlassian.net/browse/FTI-6168) + +- **jwt**: ensure `rsa_public_key` isn't base64-decoded. + [#13717](https://github.com/Kong/kong/issues/13717) + +- **key-auth**: Fixed an issue with the order of query arguments, ensuring that arguments retain order when hiding the credentials. + [#13619](https://github.com/Kong/kong/issues/13619) + [KAG-5238](https://konghq.atlassian.net/browse/KAG-5238) + +- **rate-limiting**: Fixed a bug where the returned values from `get_redis_connection()` were incorrect. + [#13613](https://github.com/Kong/kong/issues/13613) + +- **rate-limiting**: Fixed an issue that caused an HTTP 500 error when `hide_client_headers` was set to `true` and the request exceeded the rate limit. + [#13722](https://github.com/Kong/kong/issues/13722) + [KAG-5492](https://konghq.atlassian.net/browse/KAG-5492) +#### Admin API + +- Fix for querying admin API entities with empty tags + [#13723](https://github.com/Kong/kong/issues/13723) + [KAG-5496](https://konghq.atlassian.net/browse/KAG-5496) + +- Fixed an issue where nested parameters couldn't be parsed correctly when using `form-urlencoded` requests. + [#13668](https://github.com/Kong/kong/issues/13668) + [FTI-6165](https://konghq.atlassian.net/browse/FTI-6165) +#### Clustering + +- **Clustering**: Adjusted error log levels for control plane connections. + [#13863](https://github.com/Kong/kong/issues/13863) + [FTI-6238](https://konghq.atlassian.net/browse/FTI-6238) +#### Default + +- **Loggly**: Fixed an issue where `/bin/hostname` missing caused an error warning on startup. + [#13788](https://github.com/Kong/kong/issues/13788) + [FTI-6046](https://konghq.atlassian.net/browse/FTI-6046) + +## Kong-Manager + +### Fixes +#### Default + +- Kong Manager will now hide the scope change field when creating/editing a scoped plugin from another entity. + [#297](https://github.com/Kong/kong-manager/issues/297) + + +- Improved the user experience in Kong Manager by fixing various UI-related issues. + [#277](https://github.com/Kong/kong-manager/issues/277) [#283](https://github.com/Kong/kong-manager/issues/283) [#286](https://github.com/Kong/kong-manager/issues/286) [#287](https://github.com/Kong/kong-manager/issues/287) [#288](https://github.com/Kong/kong-manager/issues/288) [#291](https://github.com/Kong/kong-manager/issues/291) [#293](https://github.com/Kong/kong-manager/issues/293) [#295](https://github.com/Kong/kong-manager/issues/295) [#298](https://github.com/Kong/kong-manager/issues/298) [#302](https://github.com/Kong/kong-manager/issues/302) [#304](https://github.com/Kong/kong-manager/issues/304) [#306](https://github.com/Kong/kong-manager/issues/306) [#309](https://github.com/Kong/kong-manager/issues/309) [#317](https://github.com/Kong/kong-manager/issues/317) [#319](https://github.com/Kong/kong-manager/issues/319) [#322](https://github.com/Kong/kong-manager/issues/322) [#325](https://github.com/Kong/kong-manager/issues/325) [#329](https://github.com/Kong/kong-manager/issues/329) [#330](https://github.com/Kong/kong-manager/issues/330) + + +- Unified the redirection logic in Kong Manager upon entity operations. + [#289](https://github.com/Kong/kong-manager/issues/289) + diff --git a/changelog/3.9.0/kong-manager/.gitkeep b/changelog/3.9.0/kong-manager/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/changelog/3.9.0/kong-manager/hide-plugin-scoping.yml b/changelog/3.9.0/kong-manager/hide-plugin-scoping.yml new file mode 100644 index 000000000000..7ebd1dfb83b0 --- /dev/null +++ b/changelog/3.9.0/kong-manager/hide-plugin-scoping.yml @@ -0,0 +1,3 @@ +message: Kong Manager will now hide the scope change field when creating/editing a scoped plugin from another entity. +type: bugfix +githubs: [297] diff --git a/changelog/3.9.0/kong-manager/ui-improvements.yml b/changelog/3.9.0/kong-manager/ui-improvements.yml new file mode 100644 index 000000000000..471f74dddb5f --- /dev/null +++ b/changelog/3.9.0/kong-manager/ui-improvements.yml @@ -0,0 +1,22 @@ +message: Improved the user experience in Kong Manager by fixing various UI-related issues. +type: bugfix +githubs: + - 277 + - 283 + - 286 + - 287 + - 288 + - 291 + - 293 + - 295 + - 298 + - 302 + - 304 + - 306 + - 309 + - 317 + - 319 + - 322 + - 325 + - 329 + - 330 diff --git a/changelog/3.9.0/kong-manager/unified-redirection.yml b/changelog/3.9.0/kong-manager/unified-redirection.yml new file mode 100644 index 000000000000..228a5f1cd234 --- /dev/null +++ b/changelog/3.9.0/kong-manager/unified-redirection.yml @@ -0,0 +1,3 @@ +message: Unified the redirection logic in Kong Manager upon entity operations. +type: bugfix +githubs: [289] diff --git a/changelog/3.9.0/kong/.gitkeep b/changelog/3.9.0/kong/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/changelog/unreleased/kong/add-noble-numbat.yml b/changelog/3.9.0/kong/add-noble-numbat.yml similarity index 100% rename from changelog/unreleased/kong/add-noble-numbat.yml rename to changelog/3.9.0/kong/add-noble-numbat.yml diff --git a/changelog/unreleased/kong/add_multiple_domain_for_gui.yml b/changelog/3.9.0/kong/add_multiple_domain_for_gui.yml similarity index 100% rename from changelog/unreleased/kong/add_multiple_domain_for_gui.yml rename to changelog/3.9.0/kong/add_multiple_domain_for_gui.yml diff --git a/changelog/unreleased/kong/ai-anthropic-fix-function-calling.yml b/changelog/3.9.0/kong/ai-anthropic-fix-function-calling.yml similarity index 100% rename from changelog/unreleased/kong/ai-anthropic-fix-function-calling.yml rename to changelog/3.9.0/kong/ai-anthropic-fix-function-calling.yml diff --git a/changelog/unreleased/kong/ai-bedrock-fix-function-calling.yml b/changelog/3.9.0/kong/ai-bedrock-fix-function-calling.yml similarity index 100% rename from changelog/unreleased/kong/ai-bedrock-fix-function-calling.yml rename to changelog/3.9.0/kong/ai-bedrock-fix-function-calling.yml diff --git a/changelog/unreleased/kong/ai-bedrock-fix-guardrails.yml b/changelog/3.9.0/kong/ai-bedrock-fix-guardrails.yml similarity index 100% rename from changelog/unreleased/kong/ai-bedrock-fix-guardrails.yml rename to changelog/3.9.0/kong/ai-bedrock-fix-guardrails.yml diff --git a/changelog/unreleased/kong/ai-cohere-fix-function-calling.yml b/changelog/3.9.0/kong/ai-cohere-fix-function-calling.yml similarity index 100% rename from changelog/unreleased/kong/ai-cohere-fix-function-calling.yml rename to changelog/3.9.0/kong/ai-cohere-fix-function-calling.yml diff --git a/changelog/unreleased/kong/ai-gemini-blocks-content-safety.yml b/changelog/3.9.0/kong/ai-gemini-blocks-content-safety.yml similarity index 100% rename from changelog/unreleased/kong/ai-gemini-blocks-content-safety.yml rename to changelog/3.9.0/kong/ai-gemini-blocks-content-safety.yml diff --git a/changelog/unreleased/kong/ai-gemini-fix-function-calling.yml b/changelog/3.9.0/kong/ai-gemini-fix-function-calling.yml similarity index 100% rename from changelog/unreleased/kong/ai-gemini-fix-function-calling.yml rename to changelog/3.9.0/kong/ai-gemini-fix-function-calling.yml diff --git a/changelog/unreleased/kong/ai-gemini-fix-transformer-plugins.yml b/changelog/3.9.0/kong/ai-gemini-fix-transformer-plugins.yml similarity index 100% rename from changelog/unreleased/kong/ai-gemini-fix-transformer-plugins.yml rename to changelog/3.9.0/kong/ai-gemini-fix-transformer-plugins.yml diff --git a/changelog/unreleased/kong/ai-transformers-bad-error-handling.yml b/changelog/3.9.0/kong/ai-transformers-bad-error-handling.yml similarity index 100% rename from changelog/unreleased/kong/ai-transformers-bad-error-handling.yml rename to changelog/3.9.0/kong/ai-transformers-bad-error-handling.yml diff --git a/changelog/unreleased/kong/bump-dockerfile-ubi9.yml b/changelog/3.9.0/kong/bump-dockerfile-ubi9.yml similarity index 100% rename from changelog/unreleased/kong/bump-dockerfile-ubi9.yml rename to changelog/3.9.0/kong/bump-dockerfile-ubi9.yml diff --git a/changelog/unreleased/kong/bump-lua-kong-nginx-module.yml b/changelog/3.9.0/kong/bump-lua-kong-nginx-module.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-kong-nginx-module.yml rename to changelog/3.9.0/kong/bump-lua-kong-nginx-module.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-aws.yml b/changelog/3.9.0/kong/bump-lua-resty-aws.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-resty-aws.yml rename to changelog/3.9.0/kong/bump-lua-resty-aws.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-events.yml b/changelog/3.9.0/kong/bump-lua-resty-events.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-resty-events.yml rename to changelog/3.9.0/kong/bump-lua-resty-events.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-ljsonschema.yml b/changelog/3.9.0/kong/bump-lua-resty-ljsonschema.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-resty-ljsonschema.yml rename to changelog/3.9.0/kong/bump-lua-resty-ljsonschema.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-lmdb-2.yml b/changelog/3.9.0/kong/bump-lua-resty-lmdb-2.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-resty-lmdb-2.yml rename to changelog/3.9.0/kong/bump-lua-resty-lmdb-2.yml diff --git a/changelog/unreleased/kong/bump-lua-resty-lmdb.yml b/changelog/3.9.0/kong/bump-lua-resty-lmdb.yml similarity index 100% rename from changelog/unreleased/kong/bump-lua-resty-lmdb.yml rename to changelog/3.9.0/kong/bump-lua-resty-lmdb.yml diff --git a/changelog/unreleased/kong/bump-ngx-wasm-module.yml b/changelog/3.9.0/kong/bump-ngx-wasm-module.yml similarity index 100% rename from changelog/unreleased/kong/bump-ngx-wasm-module.yml rename to changelog/3.9.0/kong/bump-ngx-wasm-module.yml diff --git a/changelog/unreleased/kong/bump-prometheus-latency-bucket.yml b/changelog/3.9.0/kong/bump-prometheus-latency-bucket.yml similarity index 100% rename from changelog/unreleased/kong/bump-prometheus-latency-bucket.yml rename to changelog/3.9.0/kong/bump-prometheus-latency-bucket.yml diff --git a/changelog/unreleased/kong/bump-wasmtime.yml b/changelog/3.9.0/kong/bump-wasmtime.yml similarity index 100% rename from changelog/unreleased/kong/bump-wasmtime.yml rename to changelog/3.9.0/kong/bump-wasmtime.yml diff --git a/changelog/unreleased/kong/bump_openssl.yml b/changelog/3.9.0/kong/bump_openssl.yml similarity index 100% rename from changelog/unreleased/kong/bump_openssl.yml rename to changelog/3.9.0/kong/bump_openssl.yml diff --git a/changelog/unreleased/kong/chore-clustering-log-level.yml b/changelog/3.9.0/kong/chore-clustering-log-level.yml similarity index 100% rename from changelog/unreleased/kong/chore-clustering-log-level.yml rename to changelog/3.9.0/kong/chore-clustering-log-level.yml diff --git a/changelog/unreleased/kong/cp-dp-rpc.yml b/changelog/3.9.0/kong/cp-dp-rpc.yml similarity index 100% rename from changelog/unreleased/kong/cp-dp-rpc.yml rename to changelog/3.9.0/kong/cp-dp-rpc.yml diff --git a/changelog/unreleased/kong/deprecate_node_id.yml b/changelog/3.9.0/kong/deprecate_node_id.yml similarity index 100% rename from changelog/unreleased/kong/deprecate_node_id.yml rename to changelog/3.9.0/kong/deprecate_node_id.yml diff --git a/changelog/unreleased/kong/feat-add-ada.yml b/changelog/3.9.0/kong/feat-add-ada.yml similarity index 100% rename from changelog/unreleased/kong/feat-add-ada.yml rename to changelog/3.9.0/kong/feat-add-ada.yml diff --git a/changelog/unreleased/kong/feat-add-huggingface-llm-driver.yml b/changelog/3.9.0/kong/feat-add-huggingface-llm-driver.yml similarity index 100% rename from changelog/unreleased/kong/feat-add-huggingface-llm-driver.yml rename to changelog/3.9.0/kong/feat-add-huggingface-llm-driver.yml diff --git a/changelog/unreleased/kong/feat-ai-proxy-disable-h2-alpn.yml b/changelog/3.9.0/kong/feat-ai-proxy-disable-h2-alpn.yml similarity index 100% rename from changelog/unreleased/kong/feat-ai-proxy-disable-h2-alpn.yml rename to changelog/3.9.0/kong/feat-ai-proxy-disable-h2-alpn.yml diff --git a/changelog/unreleased/kong/feat-api-yaml-media-type.yml b/changelog/3.9.0/kong/feat-api-yaml-media-type.yml similarity index 100% rename from changelog/unreleased/kong/feat-api-yaml-media-type.yml rename to changelog/3.9.0/kong/feat-api-yaml-media-type.yml diff --git a/changelog/unreleased/kong/feat-correlation-id-order.yml b/changelog/3.9.0/kong/feat-correlation-id-order.yml similarity index 100% rename from changelog/unreleased/kong/feat-correlation-id-order.yml rename to changelog/3.9.0/kong/feat-correlation-id-order.yml diff --git a/changelog/unreleased/kong/feat-disable-h2-alpn.yml b/changelog/3.9.0/kong/feat-disable-h2-alpn.yml similarity index 100% rename from changelog/unreleased/kong/feat-disable-h2-alpn.yml rename to changelog/3.9.0/kong/feat-disable-h2-alpn.yml diff --git a/changelog/unreleased/kong/feat-kong-drain-cmd.yml b/changelog/3.9.0/kong/feat-kong-drain-cmd.yml similarity index 100% rename from changelog/unreleased/kong/feat-kong-drain-cmd.yml rename to changelog/3.9.0/kong/feat-kong-drain-cmd.yml diff --git a/changelog/unreleased/kong/feat-pdk-clear-query-arg.yml b/changelog/3.9.0/kong/feat-pdk-clear-query-arg.yml similarity index 100% rename from changelog/unreleased/kong/feat-pdk-clear-query-arg.yml rename to changelog/3.9.0/kong/feat-pdk-clear-query-arg.yml diff --git a/changelog/unreleased/kong/feat-request-debguger-finer-resolution-and-total-latency.yml b/changelog/3.9.0/kong/feat-request-debguger-finer-resolution-and-total-latency.yml similarity index 100% rename from changelog/unreleased/kong/feat-request-debguger-finer-resolution-and-total-latency.yml rename to changelog/3.9.0/kong/feat-request-debguger-finer-resolution-and-total-latency.yml diff --git a/changelog/unreleased/kong/feat-tracing-pdk-attributes.yml b/changelog/3.9.0/kong/feat-tracing-pdk-attributes.yml similarity index 100% rename from changelog/unreleased/kong/feat-tracing-pdk-attributes.yml rename to changelog/3.9.0/kong/feat-tracing-pdk-attributes.yml diff --git a/changelog/unreleased/kong/fix-admin-api-for-empty-tags.yml b/changelog/3.9.0/kong/fix-admin-api-for-empty-tags.yml similarity index 100% rename from changelog/unreleased/kong/fix-admin-api-for-empty-tags.yml rename to changelog/3.9.0/kong/fix-admin-api-for-empty-tags.yml diff --git a/changelog/unreleased/kong/fix-ai-proxy-multi-modal-azure.yml b/changelog/3.9.0/kong/fix-ai-proxy-multi-modal-azure.yml similarity index 100% rename from changelog/unreleased/kong/fix-ai-proxy-multi-modal-azure.yml rename to changelog/3.9.0/kong/fix-ai-proxy-multi-modal-azure.yml diff --git a/changelog/unreleased/kong/fix-ai-semantic-cache-model.yml b/changelog/3.9.0/kong/fix-ai-semantic-cache-model.yml similarity index 100% rename from changelog/unreleased/kong/fix-ai-semantic-cache-model.yml rename to changelog/3.9.0/kong/fix-ai-semantic-cache-model.yml diff --git a/changelog/unreleased/kong/fix-aws-lambda-multi-value-header-null.yml b/changelog/3.9.0/kong/fix-aws-lambda-multi-value-header-null.yml similarity index 100% rename from changelog/unreleased/kong/fix-aws-lambda-multi-value-header-null.yml rename to changelog/3.9.0/kong/fix-aws-lambda-multi-value-header-null.yml diff --git a/changelog/unreleased/kong/fix-balancer-health-checker.yml b/changelog/3.9.0/kong/fix-balancer-health-checker.yml similarity index 100% rename from changelog/unreleased/kong/fix-balancer-health-checker.yml rename to changelog/3.9.0/kong/fix-balancer-health-checker.yml diff --git a/changelog/unreleased/kong/fix-core-pass-ctx-to-log-init-worker-errors.yml b/changelog/3.9.0/kong/fix-core-pass-ctx-to-log-init-worker-errors.yml similarity index 100% rename from changelog/unreleased/kong/fix-core-pass-ctx-to-log-init-worker-errors.yml rename to changelog/3.9.0/kong/fix-core-pass-ctx-to-log-init-worker-errors.yml diff --git a/changelog/unreleased/kong/fix-jwt-plugin-rsa-public-key-b64decoded.yml b/changelog/3.9.0/kong/fix-jwt-plugin-rsa-public-key-b64decoded.yml similarity index 100% rename from changelog/unreleased/kong/fix-jwt-plugin-rsa-public-key-b64decoded.yml rename to changelog/3.9.0/kong/fix-jwt-plugin-rsa-public-key-b64decoded.yml diff --git a/changelog/unreleased/kong/fix-key-auth-retain-query-order.yml b/changelog/3.9.0/kong/fix-key-auth-retain-query-order.yml similarity index 100% rename from changelog/unreleased/kong/fix-key-auth-retain-query-order.yml rename to changelog/3.9.0/kong/fix-key-auth-retain-query-order.yml diff --git a/changelog/unreleased/kong/fix-loggly-hostname-notfound.yml b/changelog/3.9.0/kong/fix-loggly-hostname-notfound.yml similarity index 100% rename from changelog/unreleased/kong/fix-loggly-hostname-notfound.yml rename to changelog/3.9.0/kong/fix-loggly-hostname-notfound.yml diff --git a/changelog/unreleased/kong/fix-ngx-balancer-recreate-request-api-for-balancer-body-refresh.yml b/changelog/3.9.0/kong/fix-ngx-balancer-recreate-request-api-for-balancer-body-refresh.yml similarity index 100% rename from changelog/unreleased/kong/fix-ngx-balancer-recreate-request-api-for-balancer-body-refresh.yml rename to changelog/3.9.0/kong/fix-ngx-balancer-recreate-request-api-for-balancer-body-refresh.yml diff --git a/changelog/unreleased/kong/fix-parse-nested-parameters.yml b/changelog/3.9.0/kong/fix-parse-nested-parameters.yml similarity index 100% rename from changelog/unreleased/kong/fix-parse-nested-parameters.yml rename to changelog/3.9.0/kong/fix-parse-nested-parameters.yml diff --git a/changelog/unreleased/kong/fix-pdk-inspect-notice.yml b/changelog/3.9.0/kong/fix-pdk-inspect-notice.yml similarity index 100% rename from changelog/unreleased/kong/fix-pdk-inspect-notice.yml rename to changelog/3.9.0/kong/fix-pdk-inspect-notice.yml diff --git a/changelog/unreleased/kong/fix-plugin-conf-ws-id.yml b/changelog/3.9.0/kong/fix-plugin-conf-ws-id.yml similarity index 100% rename from changelog/unreleased/kong/fix-plugin-conf-ws-id.yml rename to changelog/3.9.0/kong/fix-plugin-conf-ws-id.yml diff --git a/changelog/unreleased/kong/fix-retries-error-message.yml b/changelog/3.9.0/kong/fix-retries-error-message.yml similarity index 100% rename from changelog/unreleased/kong/fix-retries-error-message.yml rename to changelog/3.9.0/kong/fix-retries-error-message.yml diff --git a/changelog/unreleased/kong/fix-return-values-mistaken-in-rate-limiting-plugin.yml b/changelog/3.9.0/kong/fix-return-values-mistaken-in-rate-limiting-plugin.yml similarity index 100% rename from changelog/unreleased/kong/fix-return-values-mistaken-in-rate-limiting-plugin.yml rename to changelog/3.9.0/kong/fix-return-values-mistaken-in-rate-limiting-plugin.yml diff --git a/changelog/unreleased/kong/fix-rl-plugin-resp-hdr.yml b/changelog/3.9.0/kong/fix-rl-plugin-resp-hdr.yml similarity index 100% rename from changelog/unreleased/kong/fix-rl-plugin-resp-hdr.yml rename to changelog/3.9.0/kong/fix-rl-plugin-resp-hdr.yml diff --git a/changelog/unreleased/kong/fix-schema-validation-with-nil-field.yml b/changelog/3.9.0/kong/fix-schema-validation-with-nil-field.yml similarity index 100% rename from changelog/unreleased/kong/fix-schema-validation-with-nil-field.yml rename to changelog/3.9.0/kong/fix-schema-validation-with-nil-field.yml diff --git a/changelog/unreleased/kong/fix-vault-array-config.yml b/changelog/3.9.0/kong/fix-vault-array-config.yml similarity index 100% rename from changelog/unreleased/kong/fix-vault-array-config.yml rename to changelog/3.9.0/kong/fix-vault-array-config.yml diff --git a/changelog/unreleased/kong/fix-vault-cache-workspace-id.yml b/changelog/3.9.0/kong/fix-vault-cache-workspace-id.yml similarity index 100% rename from changelog/unreleased/kong/fix-vault-cache-workspace-id.yml rename to changelog/3.9.0/kong/fix-vault-cache-workspace-id.yml diff --git a/changelog/unreleased/kong/fix-vault-stream-subsystem.yml b/changelog/3.9.0/kong/fix-vault-stream-subsystem.yml similarity index 100% rename from changelog/unreleased/kong/fix-vault-stream-subsystem.yml rename to changelog/3.9.0/kong/fix-vault-stream-subsystem.yml diff --git a/changelog/unreleased/kong/fix-wasm-check-missing-filters.yml b/changelog/3.9.0/kong/fix-wasm-check-missing-filters.yml similarity index 100% rename from changelog/unreleased/kong/fix-wasm-check-missing-filters.yml rename to changelog/3.9.0/kong/fix-wasm-check-missing-filters.yml diff --git a/changelog/unreleased/kong/plugins-redirect.yml b/changelog/3.9.0/kong/plugins-redirect.yml similarity index 100% rename from changelog/unreleased/kong/plugins-redirect.yml rename to changelog/3.9.0/kong/plugins-redirect.yml diff --git a/changelog/unreleased/kong/prometheus-wasmx-metrics.yml b/changelog/3.9.0/kong/prometheus-wasmx-metrics.yml similarity index 100% rename from changelog/unreleased/kong/prometheus-wasmx-metrics.yml rename to changelog/3.9.0/kong/prometheus-wasmx-metrics.yml diff --git a/changelog/3.9.0/kong/remove-datakit.yml b/changelog/3.9.0/kong/remove-datakit.yml new file mode 100644 index 000000000000..4997628b6aa2 --- /dev/null +++ b/changelog/3.9.0/kong/remove-datakit.yml @@ -0,0 +1,2 @@ +message: "**Wasm**: Removed the experimental datakit Wasm filter" +type: dependency diff --git a/changelog/unreleased/kong/revert-http2-limitation-buffered-request.yml b/changelog/3.9.0/kong/revert-http2-limitation-buffered-request.yml similarity index 100% rename from changelog/unreleased/kong/revert-http2-limitation-buffered-request.yml rename to changelog/3.9.0/kong/revert-http2-limitation-buffered-request.yml diff --git a/changelog/unreleased/kong/wasm-filter-plugins.yml b/changelog/3.9.0/kong/wasm-filter-plugins.yml similarity index 100% rename from changelog/unreleased/kong/wasm-filter-plugins.yml rename to changelog/3.9.0/kong/wasm-filter-plugins.yml From f7f7a2349ecc96b505a6f3dac7f4e8fae84d004f Mon Sep 17 00:00:00 2001 From: Michael Martin Date: Wed, 18 Dec 2024 06:45:38 -0800 Subject: [PATCH 11/18] fix(db): ensure that flattened_errors is encoded as a JSON array (#14019) --- .../kong/fix-error-flattening-json.yml | 3 + kong/db/errors.lua | 6 +- kong/tools/cjson.lua | 19 +- .../05-error-flattening_spec.lua | 2653 +++++++++++++++++ 4 files changed, 2677 insertions(+), 4 deletions(-) create mode 100644 changelog/unreleased/kong/fix-error-flattening-json.yml create mode 100644 spec/01-unit/01-db/01-schema/11-declarative_config/05-error-flattening_spec.lua diff --git a/changelog/unreleased/kong/fix-error-flattening-json.yml b/changelog/unreleased/kong/fix-error-flattening-json.yml new file mode 100644 index 000000000000..dbd50176123b --- /dev/null +++ b/changelog/unreleased/kong/fix-error-flattening-json.yml @@ -0,0 +1,3 @@ +message: "Fixed an issue where `POST /config?flatten_errors=1` could return a JSON object instead of an empty array." +type: bugfix +scope: Core diff --git a/kong/db/errors.lua b/kong/db/errors.lua index a1f96cccd1bf..0ac400f824a4 100644 --- a/kong/db/errors.lua +++ b/kong/db/errors.lua @@ -3,6 +3,7 @@ local pl_keys = require("pl.tablex").keys local nkeys = require("table.nkeys") local table_isarray = require("table.isarray") local uuid = require("kong.tools.uuid") +local json = require("kong.tools.cjson") local type = type @@ -21,6 +22,7 @@ local concat = table.concat local sort = table.sort local insert = table.insert local remove = table.remove +local new_array = json.new_array local sorted_keys = function(tbl) @@ -720,7 +722,7 @@ do ---@param ns? string ---@param flattened? table local function categorize_errors(errs, ns, flattened) - flattened = flattened or {} + flattened = flattened or new_array() for field, err in drain(errs) do local errtype = type(err) @@ -1020,7 +1022,7 @@ do ---@param input table ---@return table function flatten_errors(input, err_t) - local flattened = {} + local flattened = new_array() for entity_type, section_errors in drain(err_t) do if type(section_errors) ~= "table" then diff --git a/kong/tools/cjson.lua b/kong/tools/cjson.lua index 5ce04e1003e0..dff99fb6d8a2 100644 --- a/kong/tools/cjson.lua +++ b/kong/tools/cjson.lua @@ -1,6 +1,9 @@ local cjson = require "cjson.safe".new() local CJSON_MAX_PRECISION = require "kong.constants".CJSON_MAX_PRECISION +local new_tab = require("table.new") +local setmetatable = setmetatable +local array_mt = cjson.array_mt cjson.decode_array_with_array_mt(true) cjson.encode_sparse_array(nil, nil, 2^15) @@ -14,7 +17,19 @@ _M.encode = cjson.encode _M.decode_with_array_mt = cjson.decode -_M.array_mt = cjson.array_mt - +_M.array_mt = array_mt + +--- Creates a new table with the cjson array metatable. +--- +--- This ensures that the table will be encoded as a JSON array, even if it +--- is empty. +--- +---@param size? integer +---@return table +function _M.new_array(size) + local t = size and new_tab(size, 0) or {} + setmetatable(t, array_mt) + return t +end return _M diff --git a/spec/01-unit/01-db/01-schema/11-declarative_config/05-error-flattening_spec.lua b/spec/01-unit/01-db/01-schema/11-declarative_config/05-error-flattening_spec.lua new file mode 100644 index 000000000000..afac10a79644 --- /dev/null +++ b/spec/01-unit/01-db/01-schema/11-declarative_config/05-error-flattening_spec.lua @@ -0,0 +1,2653 @@ +local cjson = require("cjson") +local tablex = require("pl.tablex") + +local TESTS = { + { + input = { + config = { + _format_version = "3.0", + _transform = true, + certificates = { + { + cert = "-----BEGIN CERTIFICATE-----\ +MIICIzCCAYSgAwIBAgIUUMiD8e3GDZ+vs7XBmdXzMxARUrgwCgYIKoZIzj0EAwIw\ +IzENMAsGA1UECgwES29uZzESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIyMTIzMDA0\ +MDcwOFoXDTQyMTIyNTA0MDcwOFowIzENMAsGA1UECgwES29uZzESMBAGA1UEAwwJ\ +bG9jYWxob3N0MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBxSldGzzRAtjt825q\ +Uwl+BNgxecswnvbQFLiUDqJjVjCfs/B53xQfV97ddxsRymES2viC2kjAm1Ete4TH\ +CQmVltUBItHzI77HB+UsfqHoUdjl3lC/HC1yDSPBp5wd9eRRSagdl0eiJwnB9lof\ +MEnmOQLg177trb/YPz1vcCCZj7ikhzCjUzBRMB0GA1UdDgQWBBSUI6+CKqKFz/Te\ +ZJppMNl/Dh6d9DAfBgNVHSMEGDAWgBSUI6+CKqKFz/TeZJppMNl/Dh6d9DAPBgNV\ +HRMBAf8EBTADAQH/MAoGCCqGSM49BAMCA4GMADCBiAJCAZL3qX21MnGtQcl9yOMr\ +hNR54VrDKgqLR+ChU7/358n/sK/sVOjmrwVyQ52oUyqaQlfBQS2EufQVO/01+2sx\ +86gzAkIB/4Ilf4RluN2/gqHYlVEDRZzsqbwVJBHLeNKsZBSJkhNNpJBwa2Ndl9/i\ +u2tDk0KZFSAvRnqRAo9iDBUkIUI1ahA=\ +-----END CERTIFICATE-----", + key = "-----BEGIN EC PRIVATE KEY-----\ +MIHcAgEBBEIARPKnAYLB54bxBvkDfqV4NfZ+Mxl79rlaYRB6vbWVwFpy+E2pSZBR\ +doCy1tHAB/uPo+QJyjIK82Zwa3Kq0i1D2QigBwYFK4EEACOhgYkDgYYABAHFKV0b\ +PNEC2O3zbmpTCX4E2DF5yzCe9tAUuJQOomNWMJ+z8HnfFB9X3t13GxHKYRLa+ILa\ +SMCbUS17hMcJCZWW1QEi0fMjvscH5Sx+oehR2OXeUL8cLXINI8GnnB315FFJqB2X\ +R6InCcH2Wh8wSeY5AuDXvu2tv9g/PW9wIJmPuKSHMA==\ +-----END EC PRIVATE KEY-----", + tags = { + "certificate-01", + }, + }, + { + cert = "-----BEGIN CERTIFICATE-----\ +MIICIzCCAYSgAwIBAgIUUMiD8e3GDZ+vs7XBmdXzMxARUrgwCgYIKoZIzj0EAwIw\ +IzENMAsGA1UECgwES29uZzESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIyMTIzMDA0\ +MDcwOFoXDTQyohnoooooooooooooooooooooooooooooooooooooooooooasdfa\ +Uwl+BNgxecswnvbQFLiUDqJjVjCfs/B53xQfV97ddxsRymES2viC2kjAm1Ete4TH\ +CQmVltUBItHzI77AAAAAAAAAAAAAAAC/HC1yDSBBBBBBBBBBBBBdl0eiJwnB9lof\ +MEnmOQLg177trb/AAAAAAAAAAAAAAACjUzBRMBBBBBBBBBBBBBBUI6+CKqKFz/Te\ +ZJppMNl/Dh6d9DAAAAAAAAAAAAAAAASUI6+CKqBBBBBBBBBBBBB/Dh6d9DAPBgNV\ +HRMBAf8EBTADAQHAAAAAAAAAAAAAAAMCA4GMADBBBBBBBBBBBBB1MnGtQcl9yOMr\ +hNR54VrDKgqLR+CAAAAAAAAAAAAAAAjmrwVyQ5BBBBBBBBBBBBBEufQVO/01+2sx\ +86gzAkIB/4Ilf4RluN2/gqHYlVEDRZzsqbwVJBHLeNKsZBSJkhNNpJBwa2Ndl9/i\ +u2tDk0KZFSAvRnqRAo9iDBUkIUI1ahA=\ +-----END CERTIFICATE-----", + key = "-----BEGIN EC PRIVATE KEY-----\ +MIHcAgEBBEIARPKnAYLB54bxBvkDfqV4NfZ+Mxl79rlaYRB6vbWVwFpy+E2pSZBR\ +doCy1tHAB/uPo+QJyjIK82Zwa3Kq0i1D2QigBwYFK4EEACOhgYkDgYYABAHFKV0b\ +PNEC2O3zbmpTCX4E2DF5yzCe9tAUuJQOomNWMJ+z8HnfFB9X3t13GxHKYRLa+ILa\ +SMCbUS17hMcJCZWW1QEi0fMjvscH5Sx+oehR2OXeUL8cLXINI8GnnB315FFJqB2X\ +R6InCcH2Wh8wSeY5AuDXvu2tv9g/PW9wIJmPuKSHMA==\ +-----END EC PRIVATE KEY-----", + tags = { + "certificate-02", + }, + }, + }, + consumers = { + { + tags = { + "consumer-01", + }, + username = "valid_user", + }, + { + not_allowed = true, + tags = { + "consumer-02", + }, + username = "bobby_in_json_body", + }, + { + tags = { + "consumer-03", + }, + username = "super_valid_user", + }, + { + basicauth_credentials = { + { + password = "hard2guess", + tags = { + "basicauth_credentials-01", + "consumer-04", + }, + username = "superduper", + }, + { + extra_field = "NO!", + password = "12354", + tags = { + "basicauth_credentials-02", + "consumer-04", + }, + username = "dont-add-extra-fields-yo", + }, + }, + tags = { + "consumer-04", + }, + username = "credentials", + }, + }, + plugins = { + { + config = { + http_endpoint = "invalid::#//url", + }, + name = "http-log", + tags = { + "global_plugin-01", + }, + }, + }, + services = { + { + host = "localhost", + name = "nope", + port = 1234, + protocol = "nope", + routes = { + { + hosts = { + "test", + }, + methods = { + "GET", + }, + name = "valid.route", + protocols = { + "http", + "https", + }, + tags = { + "route_service-01", + "service-01", + }, + }, + { + name = "nope.route", + protocols = { + "tcp", + }, + tags = { + "route_service-02", + "service-01", + }, + }, + }, + tags = { + "service-01", + }, + }, + { + host = "localhost", + name = "mis-matched", + path = "/path", + protocol = "tcp", + routes = { + { + hosts = { + "test", + }, + methods = { + "GET", + }, + name = "invalid", + protocols = { + "http", + "https", + }, + tags = { + "route_service-03", + "service-02", + }, + }, + }, + tags = { + "service-02", + }, + }, + { + name = "okay", + routes = { + { + hosts = { + "test", + }, + methods = { + "GET", + }, + name = "probably-valid", + plugins = { + { + config = { + not_endpoint = "anything", + }, + name = "http-log", + tags = { + "route_service_plugin-01", + "route_service-04", + "service-03", + }, + }, + }, + protocols = { + "http", + "https", + }, + tags = { + "route_service-04", + "service-03", + }, + }, + }, + tags = { + "service-03", + }, + url = "http://localhost:1234", + }, + { + name = "bad-service-plugins", + plugins = { + { + config = {}, + name = "i-dont-exist", + tags = { + "service_plugin-01", + "service-04", + }, + }, + { + config = { + deeply = { + nested = { + undefined = true, + }, + }, + port = 1234, + }, + name = "tcp-log", + tags = { + "service_plugin-02", + "service-04", + }, + }, + }, + tags = { + "service-04", + }, + url = "http://localhost:1234", + }, + { + client_certificate = { + cert = "", + key = "", + tags = { + "service_client_certificate-01", + "service-05", + }, + }, + name = "bad-client-cert", + tags = { + "service-05", + }, + url = "https://localhost:1234", + }, + { + id = 123456, + name = "invalid-id", + tags = { + "service-06", + "invalid-id", + }, + url = "https://localhost:1234", + }, + { + name = "invalid-tags", + tags = { + "service-07", + "invalid-tags", + { + 1, + 2, + 3, + }, + true, + }, + url = "https://localhost:1234", + }, + { + name = "", + tags = { + "service-08", + "invalid_service_name-01", + }, + url = "https://localhost:1234", + }, + { + name = 1234, + tags = { + "service-09", + "invalid_service_name-02", + }, + url = "https://localhost:1234", + }, + }, + upstreams = { + { + hash_on = "ip", + name = "ok", + tags = { + "upstream-01", + }, + }, + { + hash_on = "ip", + healthchecks = { + active = { + concurrency = -1, + healthy = { + interval = 0, + successes = 0, + }, + http_path = "/", + https_sni = "example.com", + https_verify_certificate = true, + timeout = 1, + type = "http", + unhealthy = { + http_failures = 0, + interval = 0, + }, + }, + }, + host_header = 123, + name = "bad", + tags = { + "upstream-02", + }, + }, + { + name = "ok-bad-targets", + tags = { + "upstream-03", + }, + targets = { + { + tags = { + "upstream_target-01", + "upstream-03", + }, + target = "127.0.0.1:99", + }, + { + tags = { + "upstream_target-02", + "upstream-03", + }, + target = "hostname:1.0", + }, + }, + }, + }, + vaults = { + { + config = { + prefix = "SSL_", + }, + name = "env", + prefix = "test", + tags = { + "vault-01", + }, + }, + { + config = { + prefix = "SSL_", + }, + name = "vault-not-installed", + prefix = "env", + tags = { + "vault-02", + "vault-not-installed", + }, + }, + }, + }, + err_t = { + certificates = { + nil, + { + cert = "invalid certificate: x509.new: error:688010A:asn1 encoding routines:asn1_item_embed_d2i:nested asn1 error:asn1/tasn_dec.c:349:", + }, + }, + consumers = { + nil, + { + not_allowed = "unknown field", + }, + nil, + { + basicauth_credentials = { + nil, + { + extra_field = "unknown field", + }, + }, + }, + }, + plugins = { + { + config = { + http_endpoint = "missing host in url", + }, + }, + }, + services = { + { + protocol = "expected one of: grpc, grpcs, http, https, tcp, tls, tls_passthrough, udp", + routes = { + nil, + { + ["@entity"] = { + "must set one of 'sources', 'destinations', 'snis' when 'protocols' is 'tcp', 'tls' or 'udp'", + }, + }, + }, + }, + { + ["@entity"] = { + "failed conditional validation given value of field 'protocol'", + }, + path = "value must be null", + }, + { + routes = { + { + plugins = { + { + config = { + http_endpoint = "required field missing", + not_endpoint = "unknown field", + }, + }, + }, + }, + }, + }, + { + plugins = { + { + name = "plugin 'i-dont-exist' not enabled; add it to the 'plugins' configuration property", + }, + { + config = { + deeply = "unknown field", + host = "required field missing", + }, + }, + }, + }, + { + client_certificate = { + cert = "length must be at least 1", + key = "length must be at least 1", + }, + }, + { + id = "expected a string", + }, + { + tags = { + nil, + nil, + "expected a string", + "expected a string", + }, + }, + { + name = "length must be at least 1", + }, + { + name = "expected a string", + }, + }, + upstreams = { + nil, + { + healthchecks = { + active = { + concurrency = "value should be between 1 and 2147483648", + }, + }, + host_header = "expected a string", + }, + { + targets = { + nil, + { + target = "Invalid target ('hostname:1.0'); not a valid hostname or ip address", + }, + }, + }, + }, + vaults = { + nil, + { + name = "vault 'vault-not-installed' is not installed", + }, + }, + }, + }, + output = { + err_t = { + code = 14, + fields = {}, + flattened_errors = { + { + entity = { + config = { + prefix = "SSL_", + }, + name = "vault-not-installed", + prefix = "env", + tags = { + "vault-02", + "vault-not-installed", + }, + }, + entity_name = "vault-not-installed", + entity_tags = { + "vault-02", + "vault-not-installed", + }, + entity_type = "vault", + errors = { + { + field = "name", + message = "vault 'vault-not-installed' is not installed", + type = "field", + }, + }, + }, + { + entity = { + tags = { + "upstream_target-02", + "upstream-03", + }, + target = "hostname:1.0", + }, + entity_tags = { + "upstream_target-02", + "upstream-03", + }, + entity_type = "target", + errors = { + { + field = "target", + message = "Invalid target ('hostname:1.0'); not a valid hostname or ip address", + type = "field", + }, + }, + }, + { + entity = { + hash_on = "ip", + healthchecks = { + active = { + concurrency = -1, + healthy = { + interval = 0, + successes = 0, + }, + http_path = "/", + https_sni = "example.com", + https_verify_certificate = true, + timeout = 1, + type = "http", + unhealthy = { + http_failures = 0, + interval = 0, + }, + }, + }, + host_header = 123, + name = "bad", + tags = { + "upstream-02", + }, + }, + entity_name = "bad", + entity_tags = { + "upstream-02", + }, + entity_type = "upstream", + errors = { + { + field = "host_header", + message = "expected a string", + type = "field", + }, + { + field = "healthchecks.active.concurrency", + message = "value should be between 1 and 2147483648", + type = "field", + }, + }, + }, + { + entity = { + name = 1234, + tags = { + "service-09", + "invalid_service_name-02", + }, + url = "https://localhost:1234", + }, + entity_tags = { + "service-09", + "invalid_service_name-02", + }, + entity_type = "service", + errors = { + { + field = "name", + message = "expected a string", + type = "field", + }, + }, + }, + { + entity = { + name = "", + tags = { + "service-08", + "invalid_service_name-01", + }, + url = "https://localhost:1234", + }, + entity_tags = { + "service-08", + "invalid_service_name-01", + }, + entity_type = "service", + errors = { + { + field = "name", + message = "length must be at least 1", + type = "field", + }, + }, + }, + { + entity = { + name = "invalid-tags", + tags = { + "service-07", + "invalid-tags", + { + 1, + 2, + 3, + }, + true, + }, + url = "https://localhost:1234", + }, + entity_name = "invalid-tags", + entity_type = "service", + errors = { + { + field = "tags.4", + message = "expected a string", + type = "field", + }, + { + field = "tags.3", + message = "expected a string", + type = "field", + }, + }, + }, + { + entity = { + id = 123456, + name = "invalid-id", + tags = { + "service-06", + "invalid-id", + }, + url = "https://localhost:1234", + }, + entity_name = "invalid-id", + entity_tags = { + "service-06", + "invalid-id", + }, + entity_type = "service", + errors = { + { + field = "id", + message = "expected a string", + type = "field", + }, + }, + }, + { + entity = { + cert = "", + key = "", + tags = { + "service_client_certificate-01", + "service-05", + }, + }, + entity_tags = { + "service_client_certificate-01", + "service-05", + }, + entity_type = "certificate", + errors = { + { + field = "key", + message = "length must be at least 1", + type = "field", + }, + { + field = "cert", + message = "length must be at least 1", + type = "field", + }, + }, + }, + { + entity = { + config = {}, + name = "i-dont-exist", + tags = { + "service_plugin-01", + "service-04", + }, + }, + entity_name = "i-dont-exist", + entity_tags = { + "service_plugin-01", + "service-04", + }, + entity_type = "plugin", + errors = { + { + field = "name", + message = "plugin 'i-dont-exist' not enabled; add it to the 'plugins' configuration property", + type = "field", + }, + }, + }, + { + entity = { + config = { + deeply = { + nested = { + undefined = true, + }, + }, + port = 1234, + }, + name = "tcp-log", + tags = { + "service_plugin-02", + "service-04", + }, + }, + entity_name = "tcp-log", + entity_tags = { + "service_plugin-02", + "service-04", + }, + entity_type = "plugin", + errors = { + { + field = "config.host", + message = "required field missing", + type = "field", + }, + { + field = "config.deeply", + message = "unknown field", + type = "field", + }, + }, + }, + { + entity = { + config = { + not_endpoint = "anything", + }, + name = "http-log", + tags = { + "route_service_plugin-01", + "route_service-04", + "service-03", + }, + }, + entity_name = "http-log", + entity_tags = { + "route_service_plugin-01", + "route_service-04", + "service-03", + }, + entity_type = "plugin", + errors = { + { + field = "config.not_endpoint", + message = "unknown field", + type = "field", + }, + { + field = "config.http_endpoint", + message = "required field missing", + type = "field", + }, + }, + }, + { + entity = { + host = "localhost", + name = "mis-matched", + path = "/path", + protocol = "tcp", + tags = { + "service-02", + }, + }, + entity_name = "mis-matched", + entity_tags = { + "service-02", + }, + entity_type = "service", + errors = { + { + field = "path", + message = "value must be null", + type = "field", + }, + { + message = "failed conditional validation given value of field 'protocol'", + type = "entity", + }, + }, + }, + { + entity = { + name = "nope.route", + protocols = { + "tcp", + }, + tags = { + "route_service-02", + "service-01", + }, + }, + entity_name = "nope.route", + entity_tags = { + "route_service-02", + "service-01", + }, + entity_type = "route", + errors = { + { + message = "must set one of 'sources', 'destinations', 'snis' when 'protocols' is 'tcp', 'tls' or 'udp'", + type = "entity", + }, + }, + }, + { + entity = { + host = "localhost", + name = "nope", + port = 1234, + protocol = "nope", + tags = { + "service-01", + }, + }, + entity_name = "nope", + entity_tags = { + "service-01", + }, + entity_type = "service", + errors = { + { + field = "protocol", + message = "expected one of: grpc, grpcs, http, https, tcp, tls, tls_passthrough, udp", + type = "field", + }, + }, + }, + { + entity = { + config = { + http_endpoint = "invalid::#//url", + }, + name = "http-log", + tags = { + "global_plugin-01", + }, + }, + entity_name = "http-log", + entity_tags = { + "global_plugin-01", + }, + entity_type = "plugin", + errors = { + { + field = "config.http_endpoint", + message = "missing host in url", + type = "field", + }, + }, + }, + { + entity = { + extra_field = "NO!", + password = "12354", + tags = { + "basicauth_credentials-02", + "consumer-04", + }, + username = "dont-add-extra-fields-yo", + }, + entity_tags = { + "basicauth_credentials-02", + "consumer-04", + }, + entity_type = "basicauth_credential", + errors = { + { + field = "extra_field", + message = "unknown field", + type = "field", + }, + }, + }, + { + entity = { + not_allowed = true, + tags = { + "consumer-02", + }, + username = "bobby_in_json_body", + }, + entity_tags = { + "consumer-02", + }, + entity_type = "consumer", + errors = { + { + field = "not_allowed", + message = "unknown field", + type = "field", + }, + }, + }, + { + entity = { + cert = "-----BEGIN CERTIFICATE-----\ +MIICIzCCAYSgAwIBAgIUUMiD8e3GDZ+vs7XBmdXzMxARUrgwCgYIKoZIzj0EAwIw\ +IzENMAsGA1UECgwES29uZzESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIyMTIzMDA0\ +MDcwOFoXDTQyohnoooooooooooooooooooooooooooooooooooooooooooasdfa\ +Uwl+BNgxecswnvbQFLiUDqJjVjCfs/B53xQfV97ddxsRymES2viC2kjAm1Ete4TH\ +CQmVltUBItHzI77AAAAAAAAAAAAAAAC/HC1yDSBBBBBBBBBBBBBdl0eiJwnB9lof\ +MEnmOQLg177trb/AAAAAAAAAAAAAAACjUzBRMBBBBBBBBBBBBBBUI6+CKqKFz/Te\ +ZJppMNl/Dh6d9DAAAAAAAAAAAAAAAASUI6+CKqBBBBBBBBBBBBB/Dh6d9DAPBgNV\ +HRMBAf8EBTADAQHAAAAAAAAAAAAAAAMCA4GMADBBBBBBBBBBBBB1MnGtQcl9yOMr\ +hNR54VrDKgqLR+CAAAAAAAAAAAAAAAjmrwVyQ5BBBBBBBBBBBBBEufQVO/01+2sx\ +86gzAkIB/4Ilf4RluN2/gqHYlVEDRZzsqbwVJBHLeNKsZBSJkhNNpJBwa2Ndl9/i\ +u2tDk0KZFSAvRnqRAo9iDBUkIUI1ahA=\ +-----END CERTIFICATE-----", + key = "-----BEGIN EC PRIVATE KEY-----\ +MIHcAgEBBEIARPKnAYLB54bxBvkDfqV4NfZ+Mxl79rlaYRB6vbWVwFpy+E2pSZBR\ +doCy1tHAB/uPo+QJyjIK82Zwa3Kq0i1D2QigBwYFK4EEACOhgYkDgYYABAHFKV0b\ +PNEC2O3zbmpTCX4E2DF5yzCe9tAUuJQOomNWMJ+z8HnfFB9X3t13GxHKYRLa+ILa\ +SMCbUS17hMcJCZWW1QEi0fMjvscH5Sx+oehR2OXeUL8cLXINI8GnnB315FFJqB2X\ +R6InCcH2Wh8wSeY5AuDXvu2tv9g/PW9wIJmPuKSHMA==\ +-----END EC PRIVATE KEY-----", + tags = { + "certificate-02", + }, + }, + entity_tags = { + "certificate-02", + }, + entity_type = "certificate", + errors = { + { + field = "cert", + message = "invalid certificate: x509.new: error:688010A:asn1 encoding routines:asn1_item_embed_d2i:nested asn1 error:asn1/tasn_dec.c:349:", + type = "field", + }, + }, + }, + }, + message = "declarative config is invalid: {}", + name = "invalid declarative configuration", + }, + }, + }, + + { + input = { + config = { + _format_version = "3.0", + _transform = true, + upstreams = { + { + hash_on = "ip", + healthchecks = { + active = { + concurrency = -1, + healthy = { + interval = 0, + successes = 0, + }, + http_path = "/", + https_sni = "example.com", + https_verify_certificate = true, + timeout = 1, + type = "http", + unhealthy = { + http_failures = 0, + interval = 0, + }, + }, + }, + host_header = 123, + name = "bad", + tags = { + "upstream-01", + }, + }, + }, + }, + err_t = { + upstreams = { + { + healthchecks = { + active = { + concurrency = "value should be between 1 and 2147483648", + }, + }, + host_header = "expected a string", + }, + }, + }, + }, + output = { + err_t = { + code = 14, + fields = {}, + flattened_errors = { + { + entity = { + hash_on = "ip", + healthchecks = { + active = { + concurrency = -1, + healthy = { + interval = 0, + successes = 0, + }, + http_path = "/", + https_sni = "example.com", + https_verify_certificate = true, + timeout = 1, + type = "http", + unhealthy = { + http_failures = 0, + interval = 0, + }, + }, + }, + host_header = 123, + name = "bad", + tags = { + "upstream-01", + }, + }, + entity_name = "bad", + entity_tags = { + "upstream-01", + }, + entity_type = "upstream", + errors = { + { + field = "host_header", + message = "expected a string", + type = "field", + }, + { + field = "healthchecks.active.concurrency", + message = "value should be between 1 and 2147483648", + type = "field", + }, + }, + }, + }, + message = "declarative config is invalid: {}", + name = "invalid declarative configuration", + }, + }, + }, + + { + input = { + config = { + _format_version = "3.0", + _transform = true, + services = { + { + client_certificate = { + cert = "", + key = "", + tags = { + "service_client_certificate-01", + "service-01", + }, + }, + name = "bad-client-cert", + plugins = { + { + config = {}, + name = "i-do-not-exist", + tags = { + "service_plugin-01", + }, + }, + }, + routes = { + { + hosts = { + "test", + }, + paths = { + "/", + }, + plugins = { + { + config = { + a = { + b = { + c = "def", + }, + }, + }, + name = "http-log", + tags = { + "route_service_plugin-01", + }, + }, + }, + protocols = { + "http", + }, + tags = { + "service_route-01", + }, + }, + { + hosts = { + "invalid", + }, + paths = { + "/", + }, + protocols = { + "nope", + }, + tags = { + "service_route-02", + }, + }, + }, + tags = { + "service-01", + }, + url = "https://localhost:1234", + }, + }, + }, + err_t = { + services = { + { + client_certificate = { + cert = "length must be at least 1", + key = "length must be at least 1", + }, + plugins = { + { + name = "plugin 'i-do-not-exist' not enabled; add it to the 'plugins' configuration property", + }, + }, + routes = { + { + plugins = { + { + config = { + a = "unknown field", + http_endpoint = "required field missing", + }, + }, + }, + }, + { + protocols = "unknown type: nope", + }, + }, + }, + }, + }, + }, + output = { + err_t = { + code = 14, + fields = {}, + flattened_errors = { + { + entity = { + config = {}, + name = "i-do-not-exist", + tags = { + "service_plugin-01", + }, + }, + entity_name = "i-do-not-exist", + entity_tags = { + "service_plugin-01", + }, + entity_type = "plugin", + errors = { + { + field = "name", + message = "plugin 'i-do-not-exist' not enabled; add it to the 'plugins' configuration property", + type = "field", + }, + }, + }, + { + entity = { + cert = "", + key = "", + tags = { + "service_client_certificate-01", + "service-01", + }, + }, + entity_tags = { + "service_client_certificate-01", + "service-01", + }, + entity_type = "certificate", + errors = { + { + field = "key", + message = "length must be at least 1", + type = "field", + }, + { + field = "cert", + message = "length must be at least 1", + type = "field", + }, + }, + }, + { + entity = { + config = { + a = { + b = { + c = "def", + }, + }, + }, + name = "http-log", + tags = { + "route_service_plugin-01", + }, + }, + entity_name = "http-log", + entity_tags = { + "route_service_plugin-01", + }, + entity_type = "plugin", + errors = { + { + field = "config.http_endpoint", + message = "required field missing", + type = "field", + }, + { + field = "config.a", + message = "unknown field", + type = "field", + }, + }, + }, + { + entity = { + hosts = { + "invalid", + }, + paths = { + "/", + }, + protocols = { + "nope", + }, + tags = { + "service_route-02", + }, + }, + entity_tags = { + "service_route-02", + }, + entity_type = "route", + errors = { + { + field = "protocols", + message = "unknown type: nope", + type = "field", + }, + }, + }, + }, + message = "declarative config is invalid: {}", + name = "invalid declarative configuration", + }, + }, + }, + + { + input = { + config = { + _format_version = "3.0", + _transform = true, + consumers = { + { + basicauth_credentials = { + { + id = "089091f4-cb8b-48f5-8463-8319097be716", + password = "pwd", + tags = { + "consumer-01-credential-01", + }, + username = "user-01", + }, + { + id = "b1443d61-ccd9-4359-b82a-f37515442295", + password = "pwd", + tags = { + "consumer-01-credential-02", + }, + username = "user-11", + }, + { + id = "2603d010-edbe-4713-94ef-145e281cbf4c", + password = "pwd", + tags = { + "consumer-01-credential-03", + }, + username = "user-02", + }, + { + id = "760cf441-613c-48a2-b377-36aebc9f9ed0", + password = "pwd", + tags = { + "consumer-01-credential-04", + }, + username = "user-11", + }, + }, + id = "cdac30ee-cd7e-465c-99b6-84f3e4e17015", + tags = { + "consumer-01", + }, + username = "consumer-01", + }, + { + basicauth_credentials = { + { + id = "d0cd1919-bb07-4c85-b407-f33feb74f6e2", + password = "pwd", + tags = { + "consumer-02-credential-01", + }, + username = "user-99", + }, + }, + id = "c0c021f5-dae1-4031-bcf6-42e3c4d9ced9", + tags = { + "consumer-02", + }, + username = "consumer-02", + }, + { + basicauth_credentials = { + { + id = "7e8bcd10-cdcd-41f1-8c4d-9790d34aa67d", + password = "pwd", + tags = { + "consumer-03-credential-01", + }, + username = "user-01", + }, + { + id = "7fe186bd-44e5-4b97-854d-85a24929889d", + password = "pwd", + tags = { + "consumer-03-credential-02", + }, + username = "user-33", + }, + { + id = "6547c325-5332-41fc-a954-d4972926cdb5", + password = "pwd", + tags = { + "consumer-03-credential-03", + }, + username = "user-02", + }, + { + id = "e091a955-9ee1-4403-8d0a-a7f1f8bafaca", + password = "pwd", + tags = { + "consumer-03-credential-04", + }, + username = "user-33", + }, + }, + id = "9acb0270-73aa-4968-b9e4-a4924e4aced5", + tags = { + "consumer-03", + }, + username = "consumer-03", + }, + }, + }, + err_t = { + consumers = { + { + basicauth_credentials = { + nil, + nil, + nil, + "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-11' already declared", + }, + }, + nil, + { + basicauth_credentials = { + "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-01' already declared", + nil, + "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-02' already declared", + "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-33' already declared", + }, + }, + }, + }, + }, + output = { + err_t = { + code = 14, + fields = {}, + flattened_errors = { + { + entity = { + consumer = { + id = "9acb0270-73aa-4968-b9e4-a4924e4aced5", + }, + id = "7e8bcd10-cdcd-41f1-8c4d-9790d34aa67d", + password = "pwd", + tags = { + "consumer-03-credential-01", + }, + username = "user-01", + }, + entity_id = "7e8bcd10-cdcd-41f1-8c4d-9790d34aa67d", + entity_tags = { + "consumer-03-credential-01", + }, + entity_type = "basicauth_credential", + errors = { + { + message = "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-01' already declared", + type = "entity", + }, + }, + }, + { + entity = { + consumer = { + id = "9acb0270-73aa-4968-b9e4-a4924e4aced5", + }, + id = "6547c325-5332-41fc-a954-d4972926cdb5", + password = "pwd", + tags = { + "consumer-03-credential-03", + }, + username = "user-02", + }, + entity_id = "6547c325-5332-41fc-a954-d4972926cdb5", + entity_tags = { + "consumer-03-credential-03", + }, + entity_type = "basicauth_credential", + errors = { + { + message = "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-02' already declared", + type = "entity", + }, + }, + }, + { + entity = { + consumer = { + id = "9acb0270-73aa-4968-b9e4-a4924e4aced5", + }, + id = "e091a955-9ee1-4403-8d0a-a7f1f8bafaca", + password = "pwd", + tags = { + "consumer-03-credential-04", + }, + username = "user-33", + }, + entity_id = "e091a955-9ee1-4403-8d0a-a7f1f8bafaca", + entity_tags = { + "consumer-03-credential-04", + }, + entity_type = "basicauth_credential", + errors = { + { + message = "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-33' already declared", + type = "entity", + }, + }, + }, + { + entity = { + consumer = { + id = "cdac30ee-cd7e-465c-99b6-84f3e4e17015", + }, + id = "760cf441-613c-48a2-b377-36aebc9f9ed0", + password = "pwd", + tags = { + "consumer-01-credential-04", + }, + username = "user-11", + }, + entity_id = "760cf441-613c-48a2-b377-36aebc9f9ed0", + entity_tags = { + "consumer-01-credential-04", + }, + entity_type = "basicauth_credential", + errors = { + { + message = "uniqueness violation: 'basicauth_credentials' entity with username set to 'user-11' already declared", + type = "entity", + }, + }, + }, + }, + message = "declarative config is invalid: {}", + name = "invalid declarative configuration", + }, + }, + }, + + { + input = { + config = { + _format_version = "3.0", + _transform = true, + services = { + { + host = "localhost", + id = "0175e0e8-3de9-56b4-96f1-b12dcb4b6691", + name = "nope", + port = 1234, + protocol = "nope", + tags = { + "service-01", + }, + }, + }, + }, + err_t = { + services = { + { + protocol = "expected one of: grpc, grpcs, http, https, tcp, tls, tls_passthrough, udp", + }, + }, + }, + }, + output = { + err_t = { + code = 14, + fields = {}, + flattened_errors = { + { + entity = { + host = "localhost", + id = "0175e0e8-3de9-56b4-96f1-b12dcb4b6691", + name = "nope", + port = 1234, + protocol = "nope", + tags = { + "service-01", + }, + }, + entity_id = "0175e0e8-3de9-56b4-96f1-b12dcb4b6691", + entity_name = "nope", + entity_tags = { + "service-01", + }, + entity_type = "service", + errors = { + { + field = "protocol", + message = "expected one of: grpc, grpcs, http, https, tcp, tls, tls_passthrough, udp", + type = "field", + }, + }, + }, + }, + message = "declarative config is invalid: {}", + name = "invalid declarative configuration", + }, + }, + }, + + { + input = { + config = { + _format_version = "3.0", + _transform = true, + services = { + { + host = "localhost", + id = "cb019421-62c2-47a8-b714-d7567b114037", + name = "test", + port = 1234, + protocol = "nope", + routes = { + { + super_duper_invalid = true, + tags = { + "route-01", + }, + }, + }, + tags = { + "service-01", + }, + }, + }, + }, + err_t = { + services = { + { + protocol = "expected one of: grpc, grpcs, http, https, tcp, tls, tls_passthrough, udp", + routes = { + { + ["@entity"] = { + "must set one of 'methods', 'hosts', 'headers', 'paths', 'snis' when 'protocols' is 'https'", + }, + super_duper_invalid = "unknown field", + }, + }, + }, + }, + }, + }, + output = { + err_t = { + code = 14, + fields = {}, + flattened_errors = { + { + entity = { + service = { + id = "cb019421-62c2-47a8-b714-d7567b114037", + }, + super_duper_invalid = true, + tags = { + "route-01", + }, + }, + entity_tags = { + "route-01", + }, + entity_type = "route", + errors = { + { + field = "super_duper_invalid", + message = "unknown field", + type = "field", + }, + { + message = "must set one of 'methods', 'hosts', 'headers', 'paths', 'snis' when 'protocols' is 'https'", + type = "entity", + }, + }, + }, + { + entity = { + host = "localhost", + id = "cb019421-62c2-47a8-b714-d7567b114037", + name = "test", + port = 1234, + protocol = "nope", + tags = { + "service-01", + }, + }, + entity_id = "cb019421-62c2-47a8-b714-d7567b114037", + entity_name = "test", + entity_tags = { + "service-01", + }, + entity_type = "service", + errors = { + { + field = "protocol", + message = "expected one of: grpc, grpcs, http, https, tcp, tls, tls_passthrough, udp", + type = "field", + }, + }, + }, + }, + message = "declarative config is invalid: {}", + name = "invalid declarative configuration", + }, + }, + }, + + { + input = { + config = { + _format_version = "3.0", + _transform = true, + services = { + { + id = 1234, + name = false, + tags = { + "service-01", + { + 1.5, + }, + }, + url = "http://localhost:1234", + }, + }, + }, + err_t = { + services = { + { + id = "expected a string", + name = "expected a string", + tags = { + nil, + "expected a string", + }, + }, + }, + }, + }, + output = { + err_t = { + code = 14, + fields = {}, + flattened_errors = { + { + entity = { + id = 1234, + name = false, + tags = { + "service-01", + { + 1.5, + }, + }, + url = "http://localhost:1234", + }, + entity_type = "service", + errors = { + { + field = "tags.2", + message = "expected a string", + type = "field", + }, + { + field = "name", + message = "expected a string", + type = "field", + }, + { + field = "id", + message = "expected a string", + type = "field", + }, + }, + }, + }, + message = "declarative config is invalid: {}", + name = "invalid declarative configuration", + }, + }, + }, + + { + input = { + config = { + _format_version = "3.0", + _transform = true, + abnormal_extra_field = 123, + services = { + { + host = "localhost", + name = "nope", + port = 1234, + protocol = "nope", + routes = { + { + hosts = { + "test", + }, + methods = { + "GET", + }, + name = "valid.route", + protocols = { + "http", + "https", + }, + tags = { + "route_service-01", + "service-01", + }, + }, + { + name = "nope.route", + protocols = { + "tcp", + }, + tags = { + "route_service-02", + "service-01", + }, + }, + }, + tags = { + "service-01", + }, + }, + { + host = "localhost", + name = "mis-matched", + path = "/path", + protocol = "tcp", + routes = { + { + hosts = { + "test", + }, + methods = { + "GET", + }, + name = "invalid", + protocols = { + "http", + "https", + }, + tags = { + "route_service-03", + "service-02", + }, + }, + }, + tags = { + "service-02", + }, + }, + { + name = "okay", + routes = { + { + hosts = { + "test", + }, + methods = { + "GET", + }, + name = "probably-valid", + plugins = { + { + config = { + not_endpoint = "anything", + }, + name = "http-log", + tags = { + "route_service_plugin-01", + "route_service-04", + "service-03", + }, + }, + }, + protocols = { + "http", + "https", + }, + tags = { + "route_service-04", + "service-03", + }, + }, + }, + tags = { + "service-03", + }, + url = "http://localhost:1234", + }, + }, + }, + err_t = { + abnormal_extra_field = "unknown field", + services = { + { + protocol = "expected one of: grpc, grpcs, http, https, tcp, tls, tls_passthrough, udp", + routes = { + nil, + { + ["@entity"] = { + "must set one of 'sources', 'destinations', 'snis' when 'protocols' is 'tcp', 'tls' or 'udp'", + }, + }, + }, + }, + { + ["@entity"] = { + "failed conditional validation given value of field 'protocol'", + }, + path = "value must be null", + }, + { + routes = { + { + plugins = { + { + config = { + http_endpoint = "required field missing", + not_endpoint = "unknown field", + }, + }, + }, + }, + }, + }, + }, + }, + }, + output = { + err_t = { + code = 14, + fields = { + abnormal_extra_field = "unknown field", + }, + flattened_errors = { + { + entity = { + config = { + not_endpoint = "anything", + }, + name = "http-log", + tags = { + "route_service_plugin-01", + "route_service-04", + "service-03", + }, + }, + entity_name = "http-log", + entity_tags = { + "route_service_plugin-01", + "route_service-04", + "service-03", + }, + entity_type = "plugin", + errors = { + { + field = "config.not_endpoint", + message = "unknown field", + type = "field", + }, + { + field = "config.http_endpoint", + message = "required field missing", + type = "field", + }, + }, + }, + { + entity = { + host = "localhost", + name = "mis-matched", + path = "/path", + protocol = "tcp", + tags = { + "service-02", + }, + }, + entity_name = "mis-matched", + entity_tags = { + "service-02", + }, + entity_type = "service", + errors = { + { + field = "path", + message = "value must be null", + type = "field", + }, + { + message = "failed conditional validation given value of field 'protocol'", + type = "entity", + }, + }, + }, + { + entity = { + name = "nope.route", + protocols = { + "tcp", + }, + tags = { + "route_service-02", + "service-01", + }, + }, + entity_name = "nope.route", + entity_tags = { + "route_service-02", + "service-01", + }, + entity_type = "route", + errors = { + { + message = "must set one of 'sources', 'destinations', 'snis' when 'protocols' is 'tcp', 'tls' or 'udp'", + type = "entity", + }, + }, + }, + { + entity = { + host = "localhost", + name = "nope", + port = 1234, + protocol = "nope", + tags = { + "service-01", + }, + }, + entity_name = "nope", + entity_tags = { + "service-01", + }, + entity_type = "service", + errors = { + { + field = "protocol", + message = "expected one of: grpc, grpcs, http, https, tcp, tls, tls_passthrough, udp", + type = "field", + }, + }, + }, + }, + message = "declarative config is invalid: {abnormal_extra_field=\"unknown field\"}", + name = "invalid declarative configuration", + }, + }, + }, + + { + input = { + config = { + _format_version = "3.0", + _transform = true, + consumers = { + { + acls = { + { + group = "app", + tags = { + "k8s-name:app-acl", + "k8s-namespace:default", + "k8s-kind:Secret", + "k8s-uid:f1c5661c-a087-4c4b-b545-2d8b3870d661", + "k8s-version:v1", + }, + }, + }, + basicauth_credentials = { + { + password = "6ef728de-ba68-4e59-acb9-6e502c28ae0b", + tags = { + "k8s-name:app-cred", + "k8s-namespace:default", + "k8s-kind:Secret", + "k8s-uid:aadd4598-2969-49ea-82ac-6ab5159e2f2e", + "k8s-version:v1", + }, + username = "774f8446-6427-43f9-9962-ce7ab8097fe4", + }, + }, + id = "68d5de9f-2211-5ed8-b827-22f57a492d0f", + tags = { + "k8s-name:app", + "k8s-namespace:default", + "k8s-kind:KongConsumer", + "k8s-uid:7ee19bea-72d5-402b-bf0f-f57bf81032bf", + "k8s-group:configuration.konghq.com", + "k8s-version:v1", + }, + username = "774f8446-6427-43f9-9962-ce7ab8097fe4", + }, + }, + plugins = { + { + config = { + error_code = 429, + error_message = "API rate limit exceeded", + fault_tolerant = true, + hide_client_headers = false, + limit_by = "consumer", + policy = "local", + second = 2000, + }, + consumer = "774f8446-6427-43f9-9962-ce7ab8097fe4", + enabled = true, + name = "rate-limiting", + protocols = { + "grpc", + "grpcs", + "http", + "https", + }, + tags = { + "k8s-name:nginx-sample-1-rate", + "k8s-namespace:default", + "k8s-kind:KongPlugin", + "k8s-uid:5163972c-543d-48ae-b0f6-21701c43c1ff", + "k8s-group:configuration.konghq.com", + "k8s-version:v1", + }, + }, + { + config = { + error_code = 429, + error_message = "API rate limit exceeded", + fault_tolerant = true, + hide_client_headers = false, + limit_by = "consumer", + policy = "local", + second = 2000, + }, + consumer = "774f8446-6427-43f9-9962-ce7ab8097fe4", + enabled = true, + name = "rate-limiting", + protocols = { + "grpc", + "grpcs", + "http", + "https", + }, + tags = { + "k8s-name:nginx-sample-2-rate", + "k8s-namespace:default", + "k8s-kind:KongPlugin", + "k8s-uid:89fa1cd1-78da-4c3e-8c3b-32be1811535a", + "k8s-group:configuration.konghq.com", + "k8s-version:v1", + }, + }, + { + config = { + allow = { + "nginx-sample-1", + "app", + }, + hide_groups_header = false, + }, + enabled = true, + name = "acl", + protocols = { + "grpc", + "grpcs", + "http", + "https", + }, + service = "default.nginx-sample-1.nginx-sample-1.80", + tags = { + "k8s-name:nginx-sample-1", + "k8s-namespace:default", + "k8s-kind:KongPlugin", + "k8s-uid:b9373482-32e1-4ac3-bd2a-8926ab728700", + "k8s-group:configuration.konghq.com", + "k8s-version:v1", + }, + }, + }, + services = { + { + connect_timeout = 60000, + host = "nginx-sample-1.default.80.svc", + id = "8c17ab3e-b6bd-51b2-b5ec-878b4d608b9d", + name = "default.nginx-sample-1.nginx-sample-1.80", + path = "/", + port = 80, + protocol = "http", + read_timeout = 60000, + retries = 5, + routes = { + { + https_redirect_status_code = 426, + id = "84d45463-1faa-55cf-8ef6-4285007b715e", + methods = { + "GET", + }, + name = "default.nginx-sample-1.nginx-sample-1..80", + path_handling = "v0", + paths = { + "/sample/1", + }, + preserve_host = true, + protocols = { + "http", + "https", + }, + regex_priority = 0, + request_buffering = true, + response_buffering = true, + strip_path = false, + tags = { + "k8s-name:nginx-sample-1", + "k8s-namespace:default", + "k8s-kind:Ingress", + "k8s-uid:916a6e5a-eebe-4527-a78d-81963eb3e043", + "k8s-group:networking.k8s.io", + "k8s-version:v1", + }, + }, + }, + tags = { + "k8s-name:nginx-sample-1", + "k8s-namespace:default", + "k8s-kind:Service", + "k8s-uid:f7cc87f4-d5f7-41f8-b4e3-70608017e588", + "k8s-version:v1", + }, + write_timeout = 60000, + }, + }, + upstreams = { + { + algorithm = "round-robin", + name = "nginx-sample-1.default.80.svc", + tags = { + "k8s-name:nginx-sample-1", + "k8s-namespace:default", + "k8s-kind:Service", + "k8s-uid:f7cc87f4-d5f7-41f8-b4e3-70608017e588", + "k8s-version:v1", + }, + targets = { + { + target = "nginx-sample-1.default.svc:80", + }, + }, + }, + }, + }, + err_t = { + plugins = { + { + consumer = { + id = "missing primary key", + }, + }, + }, + }, + }, + output = { + err_t = { + code = 14, + fields = {}, + flattened_errors = { + { + entity = { + config = { + error_code = 429, + error_message = "API rate limit exceeded", + fault_tolerant = true, + hide_client_headers = false, + limit_by = "consumer", + policy = "local", + second = 2000, + }, + consumer = "774f8446-6427-43f9-9962-ce7ab8097fe4", + enabled = true, + name = "rate-limiting", + protocols = { + "grpc", + "grpcs", + "http", + "https", + }, + tags = { + "k8s-name:nginx-sample-1-rate", + "k8s-namespace:default", + "k8s-kind:KongPlugin", + "k8s-uid:5163972c-543d-48ae-b0f6-21701c43c1ff", + "k8s-group:configuration.konghq.com", + "k8s-version:v1", + }, + }, + entity_name = "rate-limiting", + entity_tags = { + "k8s-name:nginx-sample-1-rate", + "k8s-namespace:default", + "k8s-kind:KongPlugin", + "k8s-uid:5163972c-543d-48ae-b0f6-21701c43c1ff", + "k8s-group:configuration.konghq.com", + "k8s-version:v1", + }, + entity_type = "plugin", + errors = { + { + field = "consumer.id", + message = "missing primary key", + type = "field", + }, + }, + }, + }, + message = "declarative config is invalid: {}", + name = "invalid declarative configuration", + }, + }, + }, + + { + input = { + config = { + _format_version = "3.0", + _transform = true, + consumers = { + { + id = "a73dc9a7-93df-584d-97c0-7f41a1bbce3d", + tags = { + "consumer-1", + }, + username = "test-consumer-1", + }, + { + id = "a73dc9a7-93df-584d-97c0-7f41a1bbce32", + tags = { + "consumer-2", + }, + username = "test-consumer-1", + }, + }, + }, + err_t = { + consumers = { + nil, + "uniqueness violation: 'consumers' entity with username set to 'test-consumer-1' already declared", + }, + }, + }, + output = { + err_t = { + code = 14, + fields = {}, + flattened_errors = { + { + entity = { + id = "a73dc9a7-93df-584d-97c0-7f41a1bbce32", + tags = { + "consumer-2", + }, + username = "test-consumer-1", + }, + entity_id = "a73dc9a7-93df-584d-97c0-7f41a1bbce32", + entity_tags = { + "consumer-2", + }, + entity_type = "consumer", + errors = { + { + message = "uniqueness violation: 'consumers' entity with username set to 'test-consumer-1' already declared", + type = "entity", + }, + }, + }, + }, + message = "declarative config is invalid: {}", + name = "invalid declarative configuration", + }, + }, + }, + + { + input = { + config = { + _format_version = "3.0", + _transform = true, + services = { + { + connect_timeout = 60000, + host = "httproute.default.httproute-testing.0", + id = "4e3cb785-a8d0-5866-aa05-117f7c64f24d", + name = "httproute.default.httproute-testing.0", + port = 8080, + protocol = "http", + read_timeout = 60000, + retries = 5, + routes = { + { + https_redirect_status_code = 426, + id = "073fc413-1c03-50b4-8f44-43367c13daba", + name = "httproute.default.httproute-testing.0.0", + path_handling = "v0", + paths = { + "~/httproute-testing$", + "/httproute-testing/", + }, + preserve_host = true, + protocols = { + "http", + "https", + }, + strip_path = true, + tags = {}, + }, + }, + tags = {}, + write_timeout = 60000, + }, + }, + upstreams = { + { + algorithm = "round-robin", + id = "e9792964-6797-482c-bfdf-08220a4f6832", + name = "httproute.default.httproute-testing.0", + tags = { + "k8s-name:httproute-testing", + "k8s-namespace:default", + "k8s-kind:HTTPRoute", + "k8s-uid:f9792964-6797-482c-bfdf-08220a4f6839", + "k8s-group:gateway.networking.k8s.io", + "k8s-version:v1", + }, + targets = { + { + id = "715f9482-4236-5fe5-9ae5-e75c1a498940", + target = "10.244.0.11:80", + weight = 1, + }, + { + id = "89a2966d-773c-580a-b063-6ab4dfd24701", + target = "10.244.0.12:80", + weight = 1, + }, + }, + }, + { + algorithm = "round-robin", + id = "f9792964-6797-482c-bfdf-08220a4f6839", + name = "httproute.default.httproute-testing.1", + tags = { + "k8s-name:httproute-testing", + "k8s-namespace:default", + "k8s-kind:HTTPRoute", + "k8s-uid:f9792964-6797-482c-bfdf-08220a4f6839", + "k8s-group:gateway.networking.k8s.io", + "k8s-version:v1", + }, + targets = { + { + id = "48322e4a-b3b0-591b-8ed6-fd95a6d75019", + tags = { + "target-1", + }, + target = "10.244.0.12:80", + weight = 1, + }, + { + id = "48322e4a-b3b0-591b-8ed6-fd95a6d75019", + tags = { + "target-2", + }, + target = "10.244.0.12:80", + weight = 1, + }, + }, + }, + }, + }, + err_t = { + upstreams = { + nil, + { + targets = { + nil, + "uniqueness violation: 'targets' entity with primary key set to '48322e4a-b3b0-591b-8ed6-fd95a6d75019' already declared", + }, + }, + }, + }, + }, + output = { + err_t = { + code = 14, + fields = {}, + flattened_errors = { + { + entity = { + id = "48322e4a-b3b0-591b-8ed6-fd95a6d75019", + tags = { + "target-2", + }, + target = "10.244.0.12:80", + upstream = { + id = "f9792964-6797-482c-bfdf-08220a4f6839", + }, + weight = 1, + }, + entity_id = "48322e4a-b3b0-591b-8ed6-fd95a6d75019", + entity_tags = { + "target-2", + }, + entity_type = "target", + errors = { + { + message = "uniqueness violation: 'targets' entity with primary key set to '48322e4a-b3b0-591b-8ed6-fd95a6d75019' already declared", + type = "entity", + }, + }, + }, + }, + message = "declarative config is invalid: {}", + name = "invalid declarative configuration", + }, + }, + }, +} + +describe("kong.db.errors.declarative_config_flattened()", function() + local errors + + lazy_setup(function() + -- required to initialize _G.kong for the kong.db.errors module + require("spec.helpers") + errors = require("kong.db.errors") + end) + + it("flattens dbless errors into a single array", function() + local function find_err(needle, haystack) + for i = 1, #haystack do + local err = haystack[i] + + if err.entity_type == needle.entity_type + and err.entity_name == needle.entity_name + and err.entity_id == needle.entity_id + and tablex.deepcompare(err.entity_tags, needle.entity_tags, true) + then + return table.remove(haystack, i) + end + end + end + + for _, elem in ipairs(TESTS) do + local exp = elem.output.err_t + local got = errors:declarative_config_flattened(elem.input.err_t, elem.input.config) + + local missing = {} + for _, err in ipairs(exp.flattened_errors) do + local found = find_err(err, got.flattened_errors) + if found then + assert.same(err, found) + else + table.insert(missing, err) + end + end + + for _, err in ipairs(missing) do + assert.is_nil(err) + end + + assert.equals(0, #got.flattened_errors) + end + + end) + + it("retains errors that it does not understand how to flatten", function() + local input = { foo = { [2] = "some error" } } + local err_t = errors:declarative_config_flattened(input, {}) + assert.equals(0, #err_t.flattened_errors) + assert.same(input, err_t.fields) + end) + + it("ensures that `flattened_errors` encodes to a JSON array when empty", function() + local err_t = errors:declarative_config_flattened({}, {}) + assert.is_table(err_t) + local flattened_errors = assert.is_table(err_t.flattened_errors) + assert.equals(0, #flattened_errors) + assert.same(cjson.array_mt, debug.getmetatable(flattened_errors)) + assert.equals("[]", cjson.encode(flattened_errors)) + end) + + it("throws for invalid inputs", function() + assert.has_error(function() + errors:declarative_config_flattened() + end) + + assert.has_error(function() + errors:declarative_config_flattened(1, 2) + end) + + assert.has_error(function() + errors:declarative_config_flattened({}, 123) + end) + + assert.has_error(function() + errors:declarative_config_flattened(123, {}) + end) + end) +end) From 705860c3dd3dc84305a91fd57f7bb405dc4816ec Mon Sep 17 00:00:00 2001 From: Jesse Miller Date: Thu, 19 Dec 2024 08:11:25 -0800 Subject: [PATCH 12/18] docs(readme): add references to Konnect in getting started guide --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 225de00991f2..59fd90221f12 100644 --- a/README.md +++ b/README.md @@ -11,12 +11,14 @@ Kong runs natively on Kubernetes thanks to its official [Kubernetes Ingress Cont --- -[Installation](https://konghq.com/install/#kong-community) | [Documentation](https://docs.konghq.com) | [Discussions](https://github.com/Kong/kong/discussions) | [Forum](https://discuss.konghq.com) | [Blog](https://konghq.com/blog) | [Builds][kong-master-builds] +[Installation](https://konghq.com/install/#kong-community) | [Documentation](https://docs.konghq.com) | [Discussions](https://github.com/Kong/kong/discussions) | [Forum](https://discuss.konghq.com) | [Blog](https://konghq.com/blog) | [Builds][kong-master-builds] | [Cloud Hosted Kong](https://konghq.com/kong-konnect/) --- ## Getting Started +If you prefer to use a cloud-hosted Kong, you can [sign up for a free trial of Kong Konnect](https://konghq.com/products/kong-konnect/register?utm_medium=Referral&utm_source=Github&utm_campaign=kong-gateway&utm_content=konnect-promo-in-gateway&utm_term=get-started) and get started in minutes. If not, you can follow the instructions below to get started with Kong on your own infrastructure. + Let’s test drive Kong by adding authentication to an API in under 5 minutes. We suggest using the docker-compose distribution via the instructions below, but there is also a [docker installation](https://docs.konghq.com/gateway/latest/install/docker/#install-kong-gateway-in-db-less-mode) procedure if you’d prefer to run the Kong API Gateway in DB-less mode. From 14b1bf96dc37326bcca06eab15ae0bdec74cf013 Mon Sep 17 00:00:00 2001 From: kurt Date: Fri, 20 Dec 2024 10:46:54 +0800 Subject: [PATCH 13/18] chore(deps): bump lua-kong-nginx-module to 0.14.0 (#14037) Invalidate cached header in `variable_index` during `req.set_header` to ensure fresh retrieval of `ngx.var.http_*` values. Signed-off-by: tzssangglass --- .requirements | 2 +- changelog/unreleased/kong/bump-lua-kong-nginx-module-0140.yml | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/kong/bump-lua-kong-nginx-module-0140.yml diff --git a/.requirements b/.requirements index 1d1a21751553..e7e18b6d1e6e 100644 --- a/.requirements +++ b/.requirements @@ -15,7 +15,7 @@ LIBEXPAT_SHA256=d4cf38d26e21a56654ffe4acd9cd5481164619626802328506a2869afab29ab3 # Note: git repositories can be loaded from local path if path is set as value -LUA_KONG_NGINX_MODULE=3eb89666f84348fa0599d4e0a29ccf89511e8b75 # 0.13.0 +LUA_KONG_NGINX_MODULE=f85f92191fb98dbeec614a418d46b008f6a107ce # 0.14.0 LUA_RESTY_LMDB=9da0e9f3313960d06e2d8e718b7ac494faa500f1 # 1.6.0 LUA_RESTY_EVENTS=bc85295b7c23eda2dbf2b4acec35c93f77b26787 # 0.3.1 LUA_RESTY_SIMDJSON=7e6466ce91b2bc763b45701a4f055e94b1e8143b # 1.1.0 diff --git a/changelog/unreleased/kong/bump-lua-kong-nginx-module-0140.yml b/changelog/unreleased/kong/bump-lua-kong-nginx-module-0140.yml new file mode 100644 index 000000000000..a36146f00f5b --- /dev/null +++ b/changelog/unreleased/kong/bump-lua-kong-nginx-module-0140.yml @@ -0,0 +1,3 @@ +message: Bump lua-kong-nginx-module from 0.13.0 to 0.14.0 +type: dependency +scope: Core From ab888cc7951277b522e4393c43b38968cd79ae1d Mon Sep 17 00:00:00 2001 From: Chrono Date: Sat, 21 Dec 2024 06:31:30 +0800 Subject: [PATCH 14/18] feat(clustering/rpc): emit event when RPC becomes ready (#14034) This removes the artificial race condition on sleeping a few seconds before starting the sync poll, resulting in cleaner code and less error logs. KAG-5895 KAG-5891 --- kong/clustering/data_plane.lua | 48 ++++++- kong/clustering/rpc/manager.lua | 10 ++ kong/clustering/services/sync/init.lua | 35 ++++- kong/clustering/utils.lua | 1 + kong/init.lua | 10 +- .../15-cp_inert_rpc_sync_spec.lua | 124 ++++++++++++++++++ 6 files changed, 214 insertions(+), 14 deletions(-) create mode 100644 spec/02-integration/09-hybrid_mode/15-cp_inert_rpc_sync_spec.lua diff --git a/kong/clustering/data_plane.lua b/kong/clustering/data_plane.lua index 63e566863981..87ee27355867 100644 --- a/kong/clustering/data_plane.lua +++ b/kong/clustering/data_plane.lua @@ -80,10 +80,50 @@ function _M:init_worker(basic_info) self.plugins_list = basic_info.plugins self.filters = basic_info.filters - -- only run in process which worker_id() == 0 - assert(ngx.timer.at(0, function(premature) - self:communicate(premature) - end)) + local function start_communicate() + assert(ngx.timer.at(0, function(premature) + self:communicate(premature) + end)) + end + + -- does not config rpc sync + if not kong.sync then + start_communicate() + return + end + + local worker_events = assert(kong.worker_events) + + -- if rpc is ready we will check then decide how to sync + worker_events.register(function(capabilities_list) + -- we only check once + if self.inited then + return + end + + local has_sync_v2 + + -- check cp's capabilities + for _, v in ipairs(capabilities_list) do + if v == "kong.sync.v2" then + has_sync_v2 = true + break + end + end + + -- cp supports kong.sync.v2 + if has_sync_v2 then + return + end + + ngx_log(ngx_WARN, "sync v1 is enabled due to rpc sync can not work.") + + self.inited = true + + -- only run in process which worker_id() == 0 + start_communicate() + + end, "clustering:jsonrpc", "connected") end diff --git a/kong/clustering/rpc/manager.lua b/kong/clustering/rpc/manager.lua index ea5c4f5a2822..eefb1aabb6c5 100644 --- a/kong/clustering/rpc/manager.lua +++ b/kong/clustering/rpc/manager.lua @@ -270,6 +270,16 @@ function _M:_meta_call(c, meta_cap, node_id) list = capabilities_list, } + -- tell outside that rpc is ready + local worker_events = assert(kong.worker_events) + + -- notify this worker + local ok, err = worker_events.post_local("clustering:jsonrpc", "connected", + capabilities_list) + if not ok then + ngx_log(ngx_ERR, _log_prefix, "unable to post rpc connected event: ", err) + end + return true end diff --git a/kong/clustering/services/sync/init.lua b/kong/clustering/services/sync/init.lua index b64699d34017..188a003c9392 100644 --- a/kong/clustering/services/sync/init.lua +++ b/kong/clustering/services/sync/init.lua @@ -53,10 +53,39 @@ function _M:init_worker() return end - -- sync to CP ASAP - assert(self.rpc:sync_once(FIRST_SYNC_DELAY)) + local worker_events = assert(kong.worker_events) - assert(self.rpc:sync_every(EACH_SYNC_DELAY)) + -- if rpc is ready we will start to sync + worker_events.register(function(capabilities_list) + -- we only check once + if self.inited then + return + end + + local has_sync_v2 + + -- check cp's capabilities + for _, v in ipairs(capabilities_list) do + if v == "kong.sync.v2" then + has_sync_v2 = true + break + end + end + + -- cp does not support kong.sync.v2 + if not has_sync_v2 then + ngx.log(ngx.WARN, "rpc sync is disabled in CP.") + return + end + + self.inited = true + + -- sync to CP ASAP + assert(self.rpc:sync_once(FIRST_SYNC_DELAY)) + + assert(self.rpc:sync_every(EACH_SYNC_DELAY)) + + end, "clustering:jsonrpc", "connected") end diff --git a/kong/clustering/utils.lua b/kong/clustering/utils.lua index ee34e7dce2e4..f05be2353fbc 100644 --- a/kong/clustering/utils.lua +++ b/kong/clustering/utils.lua @@ -161,6 +161,7 @@ end function _M.is_dp_worker_process() if kong.configuration.role == "data_plane" + and not kong.sync -- privileged agent is only enabled when rpc sync is off and kong.configuration.dedicated_config_processing == true then return process_type() == "privileged agent" end diff --git a/kong/init.lua b/kong/init.lua index 731540f80720..8c4d45142214 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -763,6 +763,7 @@ function Kong.init() require("resty.kong.var").patch_metatable() + -- NOTE: privileged_agent is disabled when rpc sync is on if config.dedicated_config_processing and is_data_plane(config) and not kong.sync then -- TODO: figure out if there is better value than 4096 -- 4096 is for the cocurrency of the lua-resty-timer-ng @@ -881,19 +882,14 @@ function Kong.init_worker() end if kong.clustering then - local is_cp = is_control_plane(kong.configuration) - local is_dp_sync_v1 = is_data_plane(kong.configuration) and not kong.sync local using_dedicated = kong.configuration.dedicated_config_processing -- CP needs to support both v1 and v2 sync - -- v1 sync is only enabled for DP if v2 sync is disabled - if is_cp or is_dp_sync_v1 then - kong.clustering:init_worker() - end + -- v1 sync is only enabled for DP if v2 sync is unavailble + kong.clustering:init_worker() -- see is_dp_worker_process() in clustering/utils.lua if using_dedicated and process.type() == "privileged agent" then - assert(not is_cp) return end end diff --git a/spec/02-integration/09-hybrid_mode/15-cp_inert_rpc_sync_spec.lua b/spec/02-integration/09-hybrid_mode/15-cp_inert_rpc_sync_spec.lua new file mode 100644 index 000000000000..0eb5e76a8ce3 --- /dev/null +++ b/spec/02-integration/09-hybrid_mode/15-cp_inert_rpc_sync_spec.lua @@ -0,0 +1,124 @@ +local helpers = require "spec.helpers" +local cjson = require("cjson.safe") +local CLUSTERING_SYNC_STATUS = require("kong.constants").CLUSTERING_SYNC_STATUS + +for _, strategy in helpers.each_strategy() do + +describe("CP diabled Sync RPC #" .. strategy, function() + + lazy_setup(function() + helpers.get_db_utils(strategy, { + "clustering_data_planes", + }) -- runs migrations + + assert(helpers.start_kong({ + role = "control_plane", + cluster_cert = "spec/fixtures/kong_clustering.crt", + cluster_cert_key = "spec/fixtures/kong_clustering.key", + database = strategy, + cluster_listen = "127.0.0.1:9005", + nginx_conf = "spec/fixtures/custom_nginx.template", + nginx_worker_processes = 2, -- multiple workers + + cluster_rpc = "on", -- CP ENABLE rpc + cluster_rpc_sync = "off", -- CP DISABLE rpc sync + })) + + assert(helpers.start_kong({ + role = "data_plane", + database = "off", + prefix = "servroot2", + cluster_cert = "spec/fixtures/kong_clustering.crt", + cluster_cert_key = "spec/fixtures/kong_clustering.key", + cluster_control_plane = "127.0.0.1:9005", + proxy_listen = "0.0.0.0:9002", + nginx_conf = "spec/fixtures/custom_nginx.template", + nginx_worker_processes = 2, -- multiple workers + + cluster_rpc = "on", -- DP ENABLE rpc + cluster_rpc_sync = "on", -- DP ENABLE rpc sync + })) + end) + + lazy_teardown(function() + helpers.stop_kong("servroot2") + helpers.stop_kong() + end) + + after_each(function() + helpers.clean_logfile("servroot2/logs/error.log") + helpers.clean_logfile() + end) + + describe("works", function() + it("shows DP status", function() + helpers.wait_until(function() + local admin_client = helpers.admin_client() + finally(function() + admin_client:close() + end) + + local res = assert(admin_client:get("/clustering/data-planes")) + local body = assert.res_status(200, res) + local json = cjson.decode(body) + + for _, v in pairs(json.data) do + if v.ip == "127.0.0.1" then + assert.near(14 * 86400, v.ttl, 3) + assert.matches("^(%d+%.%d+)%.%d+", v.version) + assert.equal(CLUSTERING_SYNC_STATUS.NORMAL, v.sync_status) + return true + end + end + end, 10) + + -- cp will not run rpc + assert.logfile().has.no.line("[rpc]", true) + + -- dp will not run rpc too + assert.logfile("servroot2/logs/error.log").has.line( + "rpc sync is disabled in CP") + assert.logfile("servroot2/logs/error.log").has.line( + "sync v1 is enabled due to rpc sync can not work.") + end) + end) + + describe("sync works", function() + it("proxy on DP follows CP config", function() + local admin_client = helpers.admin_client(10000) + finally(function() + admin_client:close() + end) + + local res = assert(admin_client:post("/services", { + body = { name = "mockbin-service", url = "https://127.0.0.1:15556/request", }, + headers = {["Content-Type"] = "application/json"} + })) + assert.res_status(201, res) + + res = assert(admin_client:post("/services/mockbin-service/routes", { + body = { paths = { "/" }, }, + headers = {["Content-Type"] = "application/json"} + })) + + helpers.wait_until(function() + local proxy_client = helpers.http_client("127.0.0.1", 9002) + + res = proxy_client:send({ + method = "GET", + path = "/", + }) + + local status = res and res.status + proxy_client:close() + if status == 200 then + return true + end + end, 10) + end) + end) + + +end) + +end -- for _, strategy From d07425b0a7b5a3e7e29f719dd068f826e36829d9 Mon Sep 17 00:00:00 2001 From: Chrono Date: Mon, 23 Dec 2024 14:01:26 +0800 Subject: [PATCH 15/18] refactor(clustering/rpc): simplify the implementation (#14026) KAG-6036 --- kong-3.10.0-0.rockspec | 1 + kong/clustering/services/sync/hooks.lua | 58 +-- kong/clustering/services/sync/rpc.lua | 53 +-- .../services/sync/strategies/postgres.lua | 75 +--- kong/conf_loader/constants.lua | 1 - kong/db/migrations/core/024_380_to_390.lua | 9 - kong/db/migrations/core/025_390_to_3100.lua | 12 + kong/templates/kong_defaults.lua | 1 - .../09-hybrid_mode/01-sync_spec.lua | 6 + .../09-hybrid_mode/08-lazy_export_spec.lua | 1 - .../19-incrmental_sync/01-sync_spec.lua | 341 ------------------ .../02-multiple_dp_nodes_spec.lua | 113 ------ .../migrations/core/024_380_to_390_spec.lua | 9 - .../migrations/core/025_390_to_3100_spec.lua | 7 + 14 files changed, 35 insertions(+), 652 deletions(-) create mode 100644 kong/db/migrations/core/025_390_to_3100.lua delete mode 100644 spec/02-integration/19-incrmental_sync/01-sync_spec.lua delete mode 100644 spec/02-integration/19-incrmental_sync/02-multiple_dp_nodes_spec.lua create mode 100644 spec/05-migration/db/migrations/core/025_390_to_3100_spec.lua diff --git a/kong-3.10.0-0.rockspec b/kong-3.10.0-0.rockspec index 22e19b4fefe9..9aafa5c0c0a2 100644 --- a/kong-3.10.0-0.rockspec +++ b/kong-3.10.0-0.rockspec @@ -341,6 +341,7 @@ build = { ["kong.db.migrations.core.022_350_to_360"] = "kong/db/migrations/core/022_350_to_360.lua", ["kong.db.migrations.core.023_360_to_370"] = "kong/db/migrations/core/023_360_to_370.lua", ["kong.db.migrations.core.024_380_to_390"] = "kong/db/migrations/core/024_380_to_390.lua", + ["kong.db.migrations.core.025_390_to_3100"] = "kong/db/migrations/core/025_390_to_3100.lua", ["kong.db.migrations.operations.200_to_210"] = "kong/db/migrations/operations/200_to_210.lua", ["kong.db.migrations.operations.212_to_213"] = "kong/db/migrations/operations/212_to_213.lua", ["kong.db.migrations.operations.280_to_300"] = "kong/db/migrations/operations/280_to_300.lua", diff --git a/kong/clustering/services/sync/hooks.lua b/kong/clustering/services/sync/hooks.lua index a9368f755061..ddc36d6ccfaa 100644 --- a/kong/clustering/services/sync/hooks.lua +++ b/kong/clustering/services/sync/hooks.lua @@ -7,7 +7,6 @@ local EMPTY = require("kong.tools.table").EMPTY local ipairs = ipairs -local ngx_null = ngx.null local ngx_log = ngx.log local ngx_ERR = ngx.ERR local ngx_DEBUG = ngx.DEBUG @@ -74,29 +73,8 @@ function _M:notify_all_nodes() end -local function gen_delta(entity, name, options, ws_id, is_delete) - -- composite key, like { id = ... } - local schema = kong.db[name].schema - local pk = schema:extract_pk_values(entity) - - assert(schema:validate_primary_key(pk)) - - local delta = { - type = name, - pk = pk, - ws_id = ws_id, - entity = is_delete and ngx_null or entity, - } - - return delta -end - - function _M:entity_delta_writer(entity, name, options, ws_id, is_delete) - local d = gen_delta(entity, name, options, ws_id, is_delete) - local deltas = { d, } - - local res, err = self.strategy:insert_delta(deltas) + local res, err = self.strategy:insert_delta() if not res then self.strategy:cancel_txn() return nil, err @@ -164,39 +142,7 @@ function _M:register_dao_hooks() ngx_log(ngx_DEBUG, "[kong.sync.v2] new delta due to deleting ", name) - -- set lmdb value to ngx_null then return entity - - local d = gen_delta(entity, name, options, ws_id, true) - local deltas = { d, } - - -- delete other related entities - for i, item in ipairs(cascade_entries or EMPTY) do - local e = item.entity - local name = item.dao.schema.name - - ngx_log(ngx_DEBUG, "[kong.sync.v2] new delta due to cascade deleting ", name) - - d = gen_delta(e, name, options, e.ws_id, true) - - -- #1 item is initial entity - deltas[i + 1] = d - end - - local res, err = self.strategy:insert_delta(deltas) - if not res then - self.strategy:cancel_txn() - return nil, err - end - - res, err = self.strategy:commit_txn() - if not res then - self.strategy:cancel_txn() - return nil, err - end - - self:notify_all_nodes() - - return entity -- for other hooks + return self:entity_delta_writer(entity, name, options, ws_id) end local dao_hooks = { diff --git a/kong/clustering/services/sync/rpc.lua b/kong/clustering/services/sync/rpc.lua index 006a8c4bb439..b6525c3ee04a 100644 --- a/kong/clustering/services/sync/rpc.lua +++ b/kong/clustering/services/sync/rpc.lua @@ -26,14 +26,9 @@ local fmt = string.format local ngx_null = ngx.null local ngx_log = ngx.log local ngx_ERR = ngx.ERR -local ngx_INFO = ngx.INFO local ngx_DEBUG = ngx.DEBUG --- number of versions behind before a full sync is forced -local DEFAULT_FULL_SYNC_THRESHOLD = 512 - - function _M.new(strategy) local self = { strategy = strategy, @@ -43,8 +38,8 @@ function _M.new(strategy) end -local function inc_sync_result(res) - return { default = { deltas = res, wipe = false, }, } +local function empty_sync_result() + return { default = { deltas = {}, wipe = false, }, } end @@ -62,10 +57,6 @@ end function _M:init_cp(manager) local purge_delay = manager.conf.cluster_data_plane_purge_delay - -- number of versions behind before a full sync is forced - local FULL_SYNC_THRESHOLD = manager.conf.cluster_full_sync_threshold or - DEFAULT_FULL_SYNC_THRESHOLD - -- CP -- Method: kong.sync.v2.get_delta -- Params: versions: list of current versions of the database @@ -107,48 +98,12 @@ function _M:init_cp(manager) return nil, err end - -- is the node empty? If so, just do a full sync to bring it up to date faster if default_namespace_version == 0 or - latest_version - default_namespace_version > FULL_SYNC_THRESHOLD - then - -- we need to full sync because holes are found - - ngx_log(ngx_INFO, - "[kong.sync.v2] database is empty or too far behind for node_id: ", node_id, - ", current_version: ", default_namespace_version, - ", forcing a full sync") - + default_namespace_version < latest_version then return full_sync_result() end - -- do we need an incremental sync? - - local res, err = self.strategy:get_delta(default_namespace_version) - if not res then - return nil, err - end - - if isempty(res) then - -- node is already up to date - return inc_sync_result(res) - end - - -- some deltas are returned, are they contiguous? - if res[1].version == default_namespace_version + 1 then - -- doesn't wipe dp lmdb, incremental sync - return inc_sync_result(res) - end - - -- we need to full sync because holes are found - -- in the delta, meaning the oldest version is no longer - -- available - - ngx_log(ngx_INFO, - "[kong.sync.v2] delta for node_id no longer available: ", node_id, - ", current_version: ", default_namespace_version, - ", forcing a full sync") - - return full_sync_result() + return empty_sync_result() end) end diff --git a/kong/clustering/services/sync/strategies/postgres.lua b/kong/clustering/services/sync/strategies/postgres.lua index 1ef758e2c1d0..7bc0784c6e61 100644 --- a/kong/clustering/services/sync/strategies/postgres.lua +++ b/kong/clustering/services/sync/strategies/postgres.lua @@ -2,19 +2,7 @@ local _M = {} local _MT = { __index = _M } -local cjson = require("cjson.safe") -local buffer = require("string.buffer") - - -local string_format = string.format -local cjson_encode = cjson.encode local ngx_null = ngx.null -local ngx_log = ngx.log -local ngx_ERR = ngx.ERR - - -local KEEP_VERSION_COUNT = 100 -local CLEANUP_TIME_DELAY = 3600 -- 1 hour function _M.new(db) @@ -26,32 +14,8 @@ function _M.new(db) end -local PURGE_QUERY = [[ - DELETE FROM clustering_sync_version - WHERE "version" < ( - SELECT MAX("version") - %d - FROM clustering_sync_version - ); -]] - - +-- reserved for future function _M:init_worker() - local function cleanup_handler(premature) - if premature then - return - end - - local res, err = self.connector:query(string_format(PURGE_QUERY, KEEP_VERSION_COUNT)) - if not res then - ngx_log(ngx_ERR, - "[incremental] unable to purge old data from incremental delta table, err: ", - err) - - return - end - end - - assert(ngx.timer.every(CLEANUP_TIME_DELAY, cleanup_handler)) end @@ -61,37 +25,12 @@ local NEW_VERSION_QUERY = [[ new_version integer; BEGIN INSERT INTO clustering_sync_version DEFAULT VALUES RETURNING version INTO new_version; - INSERT INTO clustering_sync_delta (version, type, pk, ws_id, entity) VALUES %s; END $$; ]] --- deltas: { --- { type = "service", "pk" = { id = "d78eb00f..." }, "ws_id" = "73478cf6...", entity = "JSON", } --- { type = "route", "pk" = { id = "0a5bac5c..." }, "ws_id" = "73478cf6...", entity = "JSON", } --- } -function _M:insert_delta(deltas) - local buf = buffer.new() - - local count = #deltas - for i = 1, count do - local d = deltas[i] - - buf:putf("(new_version, %s, %s, %s, %s)", - self.connector:escape_literal(d.type), - self.connector:escape_literal(cjson_encode(d.pk)), - self.connector:escape_literal(d.ws_id or kong.default_workspace), - self.connector:escape_literal(cjson_encode(d.entity))) - - -- sql values should be separated by comma - if i < count then - buf:put(",") - end - end - - local sql = string_format(NEW_VERSION_QUERY, buf:get()) - - return self.connector:query(sql) +function _M:insert_delta() + return self.connector:query(NEW_VERSION_QUERY) end @@ -112,14 +51,6 @@ function _M:get_latest_version() end -function _M:get_delta(version) - local sql = "SELECT * FROM clustering_sync_delta" .. - " WHERE version > " .. self.connector:escape_literal(version) .. - " ORDER BY version ASC" - return self.connector:query(sql) -end - - function _M:begin_txn() return self.connector:query("BEGIN;") end diff --git a/kong/conf_loader/constants.lua b/kong/conf_loader/constants.lua index 2e3a27b31b57..f0fd9c0c0310 100644 --- a/kong/conf_loader/constants.lua +++ b/kong/conf_loader/constants.lua @@ -514,7 +514,6 @@ local CONF_PARSERS = { cluster_dp_labels = { typ = "array" }, cluster_rpc = { typ = "boolean" }, cluster_rpc_sync = { typ = "boolean" }, - cluster_full_sync_threshold = { typ = "number" }, cluster_cjson = { typ = "boolean" }, kic = { typ = "boolean" }, diff --git a/kong/db/migrations/core/024_380_to_390.lua b/kong/db/migrations/core/024_380_to_390.lua index b433500f7edc..39e4dc5af2e3 100644 --- a/kong/db/migrations/core/024_380_to_390.lua +++ b/kong/db/migrations/core/024_380_to_390.lua @@ -6,15 +6,6 @@ return { CREATE TABLE IF NOT EXISTS clustering_sync_version ( "version" SERIAL PRIMARY KEY ); - CREATE TABLE IF NOT EXISTS clustering_sync_delta ( - "version" INT NOT NULL, - "type" TEXT NOT NULL, - "pk" JSON NOT NULL, - "ws_id" UUID NOT NULL, - "entity" JSON, - FOREIGN KEY (version) REFERENCES clustering_sync_version(version) ON DELETE CASCADE - ); - CREATE INDEX IF NOT EXISTS clustering_sync_delta_version_idx ON clustering_sync_delta (version); END; $$; ]] diff --git a/kong/db/migrations/core/025_390_to_3100.lua b/kong/db/migrations/core/025_390_to_3100.lua new file mode 100644 index 000000000000..8af90e46c83c --- /dev/null +++ b/kong/db/migrations/core/025_390_to_3100.lua @@ -0,0 +1,12 @@ +return { + postgres = { + up = [[ + DO $$ + BEGIN + DROP TABLE IF EXISTS clustering_sync_delta; + DROP INDEX IF EXISTS clustering_sync_delta_version_idx; + END; + $$; + ]] + } +} diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index 83ef5f95eb3c..68f3863b5f42 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -43,7 +43,6 @@ cluster_use_proxy = off cluster_dp_labels = NONE cluster_rpc = off cluster_rpc_sync = off -cluster_full_sync_threshold = 512 cluster_cjson = off lmdb_environment_path = dbless.lmdb diff --git a/spec/02-integration/09-hybrid_mode/01-sync_spec.lua b/spec/02-integration/09-hybrid_mode/01-sync_spec.lua index bb941bd4ed30..4877869c4a23 100644 --- a/spec/02-integration/09-hybrid_mode/01-sync_spec.lua +++ b/spec/02-integration/09-hybrid_mode/01-sync_spec.lua @@ -732,6 +732,12 @@ describe("CP/DP config sync #" .. strategy .. " rpc_sync=" .. rpc_sync, function end end, 5) + -- TODO: it may cause flakiness + -- wait for rpc sync finishing + if rpc_sync == "on" then + ngx.sleep(0.5) + end + for i = 5, 2, -1 do res = proxy_client:get("/" .. i) assert.res_status(404, res) diff --git a/spec/02-integration/09-hybrid_mode/08-lazy_export_spec.lua b/spec/02-integration/09-hybrid_mode/08-lazy_export_spec.lua index bc235385c6b5..14230c89f2f6 100644 --- a/spec/02-integration/09-hybrid_mode/08-lazy_export_spec.lua +++ b/spec/02-integration/09-hybrid_mode/08-lazy_export_spec.lua @@ -93,7 +93,6 @@ describe("lazy_export with #".. strategy .. " rpc_sync=" .. rpc_sync, function() touch_config() if rpc_sync == "on" then assert.logfile().has.line("[kong.sync.v2] config push (connected client)", true) - assert.logfile().has.line("[kong.sync.v2] database is empty or too far behind for node_id", true) else assert.logfile().has.line("[clustering] exporting config", true) diff --git a/spec/02-integration/19-incrmental_sync/01-sync_spec.lua b/spec/02-integration/19-incrmental_sync/01-sync_spec.lua deleted file mode 100644 index a608e6432edb..000000000000 --- a/spec/02-integration/19-incrmental_sync/01-sync_spec.lua +++ /dev/null @@ -1,341 +0,0 @@ -local helpers = require "spec.helpers" -local cjson = require("cjson.safe") - -local function test_url(path, port, code, headers) - helpers.wait_until(function() - local proxy_client = helpers.http_client("127.0.0.1", port) - - local res = proxy_client:send({ - method = "GET", - path = path, - headers = headers, - }) - - local status = res and res.status - proxy_client:close() - if status == code then - return true - end - end, 10) -end - -for _, strategy in helpers.each_strategy() do - -describe("Incremental Sync RPC #" .. strategy, function() - - lazy_setup(function() - helpers.get_db_utils(strategy, { - "clustering_data_planes", - }) -- runs migrations - - assert(helpers.start_kong({ - role = "control_plane", - cluster_cert = "spec/fixtures/kong_clustering.crt", - cluster_cert_key = "spec/fixtures/kong_clustering.key", - database = strategy, - cluster_listen = "127.0.0.1:9005", - nginx_conf = "spec/fixtures/custom_nginx.template", - cluster_rpc = "on", - cluster_rpc_sync = "on", -- rpc sync - })) - - assert(helpers.start_kong({ - role = "data_plane", - database = "off", - prefix = "servroot2", - cluster_cert = "spec/fixtures/kong_clustering.crt", - cluster_cert_key = "spec/fixtures/kong_clustering.key", - cluster_control_plane = "127.0.0.1:9005", - proxy_listen = "0.0.0.0:9002", - nginx_conf = "spec/fixtures/custom_nginx.template", - nginx_worker_processes = 4, -- multiple workers - cluster_rpc = "on", - cluster_rpc_sync = "on", -- rpc sync - worker_state_update_frequency = 1, - })) - end) - - lazy_teardown(function() - helpers.stop_kong("servroot2") - helpers.stop_kong() - end) - - after_each(function() - helpers.clean_logfile("servroot2/logs/error.log") - helpers.clean_logfile() - end) - - describe("sync works", function() - local route_id - - it("create route on CP", function() - local admin_client = helpers.admin_client(10000) - finally(function() - admin_client:close() - end) - - local res = assert(admin_client:post("/services", { - body = { name = "service-001", url = "https://127.0.0.1:15556/request", }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - res = assert(admin_client:post("/services/service-001/routes", { - body = { paths = { "/001" }, }, - headers = {["Content-Type"] = "application/json"} - })) - local body = assert.res_status(201, res) - local json = cjson.decode(body) - - route_id = json.id - - test_url("/001", 9002, 200) - - assert.logfile().has.line("[kong.sync.v2] config push (connected client)", true) - assert.logfile().has.no.line("unable to update clustering data plane status", true) - - assert.logfile("servroot2/logs/error.log").has.line("[kong.sync.v2] update entity", true) - - -- dp lua-resty-events should work without privileged_agent - assert.logfile("servroot2/logs/error.log").has.line( - "lua-resty-events enable_privileged_agent is false", true) - end) - - it("update route on CP", function() - local admin_client = helpers.admin_client(10000) - finally(function() - admin_client:close() - end) - - local res = assert(admin_client:post("/services", { - body = { name = "service-002", url = "https://127.0.0.1:15556/request", }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - res = assert(admin_client:post("/services/service-002/routes", { - body = { paths = { "/002-foo" }, }, - headers = {["Content-Type"] = "application/json"} - })) - local body = assert.res_status(201, res) - local json = cjson.decode(body) - - route_id = json.id - - test_url("/002-foo", 9002, 200) - - res = assert(admin_client:put("/services/service-002/routes/" .. route_id, { - body = { paths = { "/002-bar" }, }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(200, res) - - test_url("/002-bar", 9002, 200) - - assert.logfile().has.line("[kong.sync.v2] config push (connected client)", true) - assert.logfile().has.no.line("unable to update clustering data plane status", true) - - assert.logfile("servroot2/logs/error.log").has.line("[kong.sync.v2] update entity", true) - end) - - it("delete route on CP", function() - local admin_client = helpers.admin_client(10000) - finally(function() - admin_client:close() - end) - - local res = assert(admin_client:post("/services", { - body = { name = "service-003", url = "https://127.0.0.1:15556/request", }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - res = assert(admin_client:post("/services/service-003/routes", { - body = { paths = { "/003-foo" }, }, - headers = {["Content-Type"] = "application/json"} - })) - local body = assert.res_status(201, res) - local json = cjson.decode(body) - - route_id = json.id - - test_url("/003-foo", 9002, 200) - - assert.logfile().has.line("[kong.sync.v2] config push (connected client)", true) - assert.logfile().has.no.line("unable to update clustering data plane status", true) - - assert.logfile("servroot2/logs/error.log").has.line("[kong.sync.v2] update entity", true) - assert.logfile("servroot2/logs/error.log").has.no.line("[kong.sync.v2] delete entity", true) - - res = assert(admin_client:delete("/services/service-003/routes/" .. route_id)) - assert.res_status(204, res) - - test_url("/003-foo", 9002, 404) - - assert.logfile("servroot2/logs/error.log").has.line("[kong.sync.v2] delete entity", true) - end) - - it("update route on CP with name", function() - local admin_client = helpers.admin_client(10000) - finally(function() - admin_client:close() - end) - - local res = assert(admin_client:post("/services", { - body = { name = "service-004", url = "https://127.0.0.1:15556/request", }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - res = assert(admin_client:post("/services/service-004/routes", { - body = { name = "route-004", paths = { "/004-foo" }, }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - test_url("/004-foo", 9002, 200) - - res = assert(admin_client:put("/services/service-004/routes/route-004", { - body = { paths = { "/004-bar" }, }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(200, res) - - test_url("/004-bar", 9002, 200) - - assert.logfile().has.line("[kong.sync.v2] config push (connected client)", true) - assert.logfile().has.no.line("unable to update clustering data plane status", true) - - assert.logfile("servroot2/logs/error.log").has.line("[kong.sync.v2] update entity", true) - end) - - it("delete route on CP with name", function() - local admin_client = helpers.admin_client(10000) - finally(function() - admin_client:close() - end) - - local res = assert(admin_client:post("/services", { - body = { name = "service-005", url = "https://127.0.0.1:15556/request", }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - res = assert(admin_client:post("/services/service-005/routes", { - body = { name = "route-005", paths = { "/005-foo" }, }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - test_url("/005-foo", 9002, 200) - - assert.logfile().has.line("[kong.sync.v2] config push (connected client)", true) - assert.logfile().has.no.line("unable to update clustering data plane status", true) - - assert.logfile("servroot2/logs/error.log").has.line("[kong.sync.v2] update entity", true) - assert.logfile("servroot2/logs/error.log").has.no.line("[kong.sync.v2] delete entity", true) - - res = assert(admin_client:delete("/services/service-005/routes/route-005")) - assert.res_status(204, res) - - test_url("/005-foo", 9002, 404) - - assert.logfile("servroot2/logs/error.log").has.line("[kong.sync.v2] delete entity", true) - end) - - it("cascade delete on CP", function() - local admin_client = helpers.admin_client(10000) - finally(function() - admin_client:close() - end) - - -- create service and route - - local res = assert(admin_client:post("/services", { - body = { name = "service-006", url = "https://127.0.0.1:15556/request", }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - res = assert(admin_client:post("/services/service-006/routes", { - body = { paths = { "/006-foo" }, }, - headers = {["Content-Type"] = "application/json"} - })) - local body = assert.res_status(201, res) - local json = cjson.decode(body) - - route_id = json.id - - test_url("/006-foo", 9002, 200) - - assert.logfile().has.line("[kong.sync.v2] config push (connected client)", true) - assert.logfile().has.no.line("unable to update clustering data plane status", true) - - assert.logfile("servroot2/logs/error.log").has.line("[kong.sync.v2] update entity", true) - - -- create consumer and key-auth - - res = assert(admin_client:post("/consumers", { - body = { username = "foo", }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - res = assert(admin_client:post("/consumers/foo/key-auth", { - body = { key = "my-key", }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - res = assert(admin_client:post("/plugins", { - body = { name = "key-auth", - config = { key_names = {"apikey"} }, - route = { id = route_id }, - }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - test_url("/006-foo", 9002, 200, {["apikey"] = "my-key"}) - - assert.logfile().has.no.line("[kong.sync.v2] new delta due to cascade deleting", true) - assert.logfile("servroot2/logs/error.log").has.no.line("[kong.sync.v2] delete entity", true) - - -- delete consumer and key-auth - - res = assert(admin_client:delete("/consumers/foo")) - assert.res_status(204, res) - - test_url("/006-foo", 9002, 401, {["apikey"] = "my-key"}) - - assert.logfile().has.line("[kong.sync.v2] new delta due to cascade deleting", true) - assert.logfile("servroot2/logs/error.log").has.line("[kong.sync.v2] delete entity", true) - - -- cascade deletion should be the same version - - local ver - local count = 0 - local patt = "delete entity, version: %d+" - local f = io.open("servroot2/logs/error.log", "r") - while true do - local line = f:read("*l") - - if not line then - f:close() - break - end - - local found = line:match(patt) - if found then - ver = ver or found - assert.equal(ver, found) - count = count + 1 - end - end - assert(count > 1) - - end) - end) - -end) - -end -- for _, strategy diff --git a/spec/02-integration/19-incrmental_sync/02-multiple_dp_nodes_spec.lua b/spec/02-integration/19-incrmental_sync/02-multiple_dp_nodes_spec.lua deleted file mode 100644 index fe7f89432a5a..000000000000 --- a/spec/02-integration/19-incrmental_sync/02-multiple_dp_nodes_spec.lua +++ /dev/null @@ -1,113 +0,0 @@ -local helpers = require "spec.helpers" -local cjson = require("cjson.safe") - -local function start_cp(strategy, port) - assert(helpers.start_kong({ - role = "control_plane", - cluster_cert = "spec/fixtures/kong_clustering.crt", - cluster_cert_key = "spec/fixtures/kong_clustering.key", - database = strategy, - cluster_listen = "127.0.0.1:" .. port, - nginx_conf = "spec/fixtures/custom_nginx.template", - cluster_rpc = "on", - cluster_rpc_sync = "on", -- rpc sync - })) -end - -local function start_dp(prefix, port) - assert(helpers.start_kong({ - role = "data_plane", - database = "off", - prefix = prefix, - cluster_cert = "spec/fixtures/kong_clustering.crt", - cluster_cert_key = "spec/fixtures/kong_clustering.key", - cluster_control_plane = "127.0.0.1:9005", - proxy_listen = "0.0.0.0:" .. port, - nginx_conf = "spec/fixtures/custom_nginx.template", - nginx_worker_processes = 4, -- multiple workers - cluster_rpc = "on", - cluster_rpc_sync = "on", -- rpc sync - worker_state_update_frequency = 1, - })) -end - -local function test_url(path, port, code) - helpers.wait_until(function() - local proxy_client = helpers.http_client("127.0.0.1", port) - - local res = proxy_client:send({ - method = "GET", - path = path, - }) - - local status = res and res.status - proxy_client:close() - if status == code then - return true - end - end, 10) -end - -for _, strategy in helpers.each_strategy() do - -describe("Incremental Sync RPC #" .. strategy, function() - - lazy_setup(function() - helpers.get_db_utils(strategy, { - "clustering_data_planes", - }) -- runs migrations - - start_cp(strategy, 9005) - start_dp("servroot2", 9002) - start_dp("servroot3", 9003) - end) - - lazy_teardown(function() - helpers.stop_kong("servroot2") - helpers.stop_kong("servroot3") - helpers.stop_kong() - end) - - describe("sync works with multiple DP nodes", function() - - it("adding/removing routes", function() - local admin_client = helpers.admin_client(10000) - finally(function() - admin_client:close() - end) - - local res = assert(admin_client:post("/services", { - body = { name = "service-001", url = "https://127.0.0.1:15556/request", }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - - -- add a route - - res = assert(admin_client:post("/services/service-001/routes", { - body = { paths = { "/001" }, }, - headers = {["Content-Type"] = "application/json"} - })) - assert.res_status(201, res) - local body = assert.res_status(201, res) - local json = cjson.decode(body) - local route_id = json.id - - test_url("/001", 9002, 200) - assert.logfile("servroot2/logs/error.log").has.line("[kong.sync.v2] update entity", true) - - test_url("/001", 9003, 200) - assert.logfile("servroot3/logs/error.log").has.line("[kong.sync.v2] update entity", true) - - -- remove a route - - res = assert(admin_client:delete("/services/service-001/routes/" .. route_id)) - assert.res_status(204, res) - - test_url("/001", 9002, 404) - test_url("/001", 9003, 404) - end) - end) -end) - -end -- for _, strategy diff --git a/spec/05-migration/db/migrations/core/024_380_to_390_spec.lua b/spec/05-migration/db/migrations/core/024_380_to_390_spec.lua index cf0f04513c68..e4b28fbce9a3 100644 --- a/spec/05-migration/db/migrations/core/024_380_to_390_spec.lua +++ b/spec/05-migration/db/migrations/core/024_380_to_390_spec.lua @@ -5,13 +5,4 @@ describe("database migration", function() assert.database_has_relation("clustering_sync_version") assert.table_has_column("clustering_sync_version", "version", "integer") end) - - uh.old_after_up("has created the \"clustering_sync_delta\" table", function() - assert.database_has_relation("clustering_sync_delta") - assert.table_has_column("clustering_sync_delta", "version", "integer") - assert.table_has_column("clustering_sync_delta", "type", "text") - assert.table_has_column("clustering_sync_delta", "pk", "json") - assert.table_has_column("clustering_sync_delta", "ws_id", "uuid") - assert.table_has_column("clustering_sync_delta", "entity", "json") - end) end) diff --git a/spec/05-migration/db/migrations/core/025_390_to_3100_spec.lua b/spec/05-migration/db/migrations/core/025_390_to_3100_spec.lua new file mode 100644 index 000000000000..32c8563aa0f1 --- /dev/null +++ b/spec/05-migration/db/migrations/core/025_390_to_3100_spec.lua @@ -0,0 +1,7 @@ +local uh = require "spec/upgrade_helpers" + +describe("database migration", function() + uh.old_after_up("does not have \"clustering_sync_delta\" table", function() + assert.not_database_has_relation("clustering_sync_delta") + end) +end) From 145816122ec8066c03502a794617721dc8058b15 Mon Sep 17 00:00:00 2001 From: Xiaochen Wang Date: Mon, 23 Dec 2024 14:20:21 +0800 Subject: [PATCH 16/18] perf(core): reduce LMDB size by optimizing key format (#14028) 1. redesign the key format for LMDB 2. remove the key with * workspace 3. search all the workspaces if upper user calls `select()` API without providing the specific workspace id KAG-5704 --- .../perf-lmdb-remove-global-query-key.yml | 3 + kong/db/declarative/import.lua | 64 +++++++++++-------- kong/db/declarative/init.lua | 1 + kong/db/strategies/off/init.lua | 36 +++++++---- spec/01-unit/01-db/10-declarative_spec.lua | 4 +- .../01-db/11-declarative_lmdb_spec.lua | 13 ++-- 6 files changed, 77 insertions(+), 44 deletions(-) create mode 100644 changelog/unreleased/kong/perf-lmdb-remove-global-query-key.yml diff --git a/changelog/unreleased/kong/perf-lmdb-remove-global-query-key.yml b/changelog/unreleased/kong/perf-lmdb-remove-global-query-key.yml new file mode 100644 index 000000000000..831f6f1776c8 --- /dev/null +++ b/changelog/unreleased/kong/perf-lmdb-remove-global-query-key.yml @@ -0,0 +1,3 @@ +message: "Reduced the LMDB storage space by optimizing the key format." +type: performance +scope: Core diff --git a/kong/db/declarative/import.lua b/kong/db/declarative/import.lua index 2030da85359a..1b2bfc727132 100644 --- a/kong/db/declarative/import.lua +++ b/kong/db/declarative/import.lua @@ -70,6 +70,7 @@ local function workspace_id(schema, options) return get_workspace_id() end + -- global query, like routes:each(page_size, GLOBAL_QUERY_OPTS) if options.workspace == null then return GLOBAL_WORKSPACE_TAG end @@ -243,26 +244,39 @@ local function find_ws(entities, name) end +-- unique key local function unique_field_key(schema_name, ws_id, field, value) - return string_format("%s|%s|%s|%s", schema_name, ws_id, field, sha256_hex(value)) + return string_format("U|%s|%s|%s|%s", schema_name, field, ws_id, sha256_hex(value)) end +-- foreign key local function foreign_field_key_prefix(schema_name, ws_id, field, foreign_id) - return string_format("%s|%s|%s|%s|", schema_name, ws_id, field, foreign_id) + if ws_id == GLOBAL_WORKSPACE_TAG then + return string_format("F|%s|%s|%s|", schema_name, field, foreign_id) + end + + return string_format("F|%s|%s|%s|%s|", schema_name, field, foreign_id, ws_id) end local function foreign_field_key(schema_name, ws_id, field, foreign_id, pk) + assert(ws_id ~= GLOBAL_WORKSPACE_TAG) return foreign_field_key_prefix(schema_name, ws_id, field, foreign_id) .. pk end +-- item key local function item_key_prefix(schema_name, ws_id) - return string_format("%s|%s|*|", schema_name, ws_id) + if ws_id == GLOBAL_WORKSPACE_TAG then + return string_format("I|%s|", schema_name) + end + + return string_format("I|%s|%s|", schema_name, ws_id) end local function item_key(schema_name, ws_id, pk_str) + assert(ws_id ~= GLOBAL_WORKSPACE_TAG) return item_key_prefix(schema_name, ws_id) .. pk_str end @@ -307,10 +321,6 @@ local function _set_entity_for_txn(t, entity_name, item, options, is_delete) -- store serialized entity into lmdb t:set(itm_key, itm_value) - -- for global query - local global_key = item_key(entity_name, GLOBAL_WORKSPACE_TAG, pk) - t:set(global_key, idx_value) - -- select_by_cache_key if schema.cache_key then local cache_key = dao:cache_key(item) @@ -347,12 +357,9 @@ local function _set_entity_for_txn(t, entity_name, item, options, is_delete) value_str = pk_string(kong.db[fdata_reference].schema, value) end - for _, wid in ipairs {field_ws_id, GLOBAL_WORKSPACE_TAG} do - local key = unique_field_key(entity_name, wid, fname, value_str or value) - - -- store item_key or nil into lmdb - t:set(key, idx_value) - end + local key = unique_field_key(entity_name, field_ws_id, fname, value_str or value) + -- store item_key or nil into lmdb + t:set(key, idx_value) end if is_foreign then @@ -361,12 +368,9 @@ local function _set_entity_for_txn(t, entity_name, item, options, is_delete) value_str = pk_string(kong.db[fdata_reference].schema, value) - for _, wid in ipairs {field_ws_id, GLOBAL_WORKSPACE_TAG} do - local key = foreign_field_key(entity_name, wid, fname, value_str, pk) - - -- store item_key or nil into lmdb - t:set(key, idx_value) - end + local key = foreign_field_key(entity_name, field_ws_id, fname, value_str, pk) + -- store item_key or nil into lmdb + t:set(key, idx_value) end ::continue:: @@ -380,18 +384,22 @@ end -- the provided LMDB txn object, this operation is only safe -- is the entity does not already exist inside the LMDB database -- --- The actual item key is: ||*| +-- The actual item key is: I||| -- --- This function sets the following: +-- This function sets the following key-value pairs: -- --- * ||*| => serialized item --- * |*|*| => actual item key +-- key: I||| +-- value: serialized item -- --- * |||sha256(field_value) => actual item key --- * |*||sha256(field_value) => actual item key +-- key: U||||sha256(field_value) +-- value: actual item key -- --- * |||| => actual item key --- * |*||| => actual item key +-- key: F||||| +-- value: actual item key +-- +-- The format of the key string follows the sequence of the construction order: +-- `item type > entity name > specific item info > workspace id > item uuid` +-- This order makes it easier to query all entities using API lmdb_prefix.page(). -- -- DO NOT touch `item`, or else the entity will be changed local function insert_entity_for_txn(t, entity_name, item, options) @@ -608,4 +616,6 @@ return { load_into_cache_with_events = load_into_cache_with_events, insert_entity_for_txn = insert_entity_for_txn, delete_entity_for_txn = delete_entity_for_txn, + + GLOBAL_WORKSPACE_TAG = GLOBAL_WORKSPACE_TAG, } diff --git a/kong/db/declarative/init.lua b/kong/db/declarative/init.lua index 73a2704f51e9..1f209657f0e1 100644 --- a/kong/db/declarative/init.lua +++ b/kong/db/declarative/init.lua @@ -261,6 +261,7 @@ _M.load_into_cache_with_events = declarative_import.load_into_cache_with_events _M.insert_entity_for_txn = declarative_import.insert_entity_for_txn _M.delete_entity_for_txn = declarative_import.delete_entity_for_txn _M.workspace_id = declarative_import.workspace_id +_M.GLOBAL_WORKSPACE_TAG = declarative_import.GLOBAL_WORKSPACE_TAG return _M diff --git a/kong/db/strategies/off/init.lua b/kong/db/strategies/off/init.lua index 1fab71dac502..d61e98b5c529 100644 --- a/kong/db/strategies/off/init.lua +++ b/kong/db/strategies/off/init.lua @@ -20,6 +20,7 @@ local item_key = declarative.item_key local item_key_prefix = declarative.item_key_prefix local workspace_id = declarative.workspace_id local foreign_field_key_prefix = declarative.foreign_field_key_prefix +local GLOBAL_WORKSPACE_TAG = declarative.GLOBAL_WORKSPACE_TAG local PROCESS_AUTO_FIELDS_OPTS = { @@ -38,11 +39,6 @@ _mt.__index = _mt local UNINIT_WORKSPACE_ID = "00000000-0000-0000-0000-000000000000" -local function need_follow(ws_id) - return ws_id == "*" -end - - local function get_default_workspace() if kong.default_workspace == UNINIT_WORKSPACE_ID then local res = kong.db.workspaces:select_by_name("default") @@ -221,11 +217,11 @@ end -- ws_id here. local function page_for_tags(self, size, offset, options) -- /:entitiy?tags=:tags - -- search all key-values: |*|*| => actual item key + -- search all key-values: I||*| => actual item key if self.schema.name ~= "tags" then - local prefix = item_key_prefix(self.schema.name, "*") -- "|*|*|" + local prefix = item_key_prefix(self.schema.name, "*") -- "I||" local items, err, offset = page_for_prefix(self, prefix, size, offset, - options, true) + options) if not items then return nil, err end @@ -279,7 +275,7 @@ local function page_for_tags(self, size, offset, options) local rows, err rows, err, offset_token = page_for_prefix(self, prefix, size, offset_token, - options, true, dao.schema) + options, false, dao.schema) if not rows then return nil, err end @@ -324,7 +320,7 @@ local function page(self, size, offset, options) return page_for_tags(self, size, offset, options) end - return page_for_prefix(self, prefix, size, offset, options, need_follow(ws_id)) + return page_for_prefix(self, prefix, size, offset, options) end @@ -333,8 +329,26 @@ local function select(self, pk, options) local schema = self.schema local ws_id = workspace_id(schema, options) local pk = pk_string(schema, pk) + + -- if no specific ws_id is provided, we need to search all workspace ids + if ws_id == GLOBAL_WORKSPACE_TAG then + for workspace, err in kong.db.workspaces:each() do + if err then + return nil, err + end + + local key = item_key(schema.name, workspace.id, pk) + local entity = select_by_key(schema, key) + if entity then + return entity + end + end + + return nil, "not found" + end + local key = item_key(schema.name, ws_id, pk) - return select_by_key(schema, key, need_follow(ws_id)) + return select_by_key(schema, key) end diff --git a/spec/01-unit/01-db/10-declarative_spec.lua b/spec/01-unit/01-db/10-declarative_spec.lua index 137bebb206e4..a383daaeaf36 100644 --- a/spec/01-unit/01-db/10-declarative_spec.lua +++ b/spec/01-unit/01-db/10-declarative_spec.lua @@ -53,14 +53,14 @@ keyauth_credentials: it("utilizes the schema name, workspace id, field name, and checksum of the field value", function() local key = unique_field_key("services", "123", "fieldname", "test", false) assert.is_string(key) - assert.equals("services|123|fieldname|" .. sha256_hex("test"), key) + assert.equals("U|services|fieldname|123|" .. sha256_hex("test"), key) end) -- since rpc sync the param `unique_across_ws` is useless -- this test case is just for compatibility it("does not omits the workspace id when 'unique_across_ws' is 'true'", function() local key = unique_field_key("services", "123", "fieldname", "test", true) - assert.equals("services|123|fieldname|" .. sha256_hex("test"), key) + assert.equals("U|services|fieldname|123|" .. sha256_hex("test"), key) end) end) diff --git a/spec/01-unit/01-db/11-declarative_lmdb_spec.lua b/spec/01-unit/01-db/11-declarative_lmdb_spec.lua index 047fadae604a..7de064ffe5e0 100644 --- a/spec/01-unit/01-db/11-declarative_lmdb_spec.lua +++ b/spec/01-unit/01-db/11-declarative_lmdb_spec.lua @@ -203,10 +203,12 @@ describe("#off preserve nulls", function() local id, item = next(entities.basicauth_credentials) -- format changed after rpc sync + -- item key local cache_key = concat({ + "I|", "basicauth_credentials|", item.ws_id, - "|*|", + "|", id }) @@ -226,13 +228,16 @@ describe("#off preserve nulls", function() if plugin.name == PLUGIN_NAME then -- format changed after rpc sync + -- foreign key: cache_key = concat({ + "F|", "plugins|", - plugin.ws_id, - "|route|", + "route|", plugin.route.id, "|", - plugin.id + plugin.ws_id, + "|", + plugin.id, }) value, err, hit_lvl = lmdb.get(cache_key) assert.is_nil(err) From 51cb5f1b4f498f21c7bb1707b6890f5b647060fd Mon Sep 17 00:00:00 2001 From: hanjian Date: Mon, 23 Dec 2024 14:23:26 +0800 Subject: [PATCH 17/18] tests(clustering): dp status ready when use RPC Sync (#14035) KAG-5994 --------- Co-authored-by: Xiaochen Wang --- .../09-hybrid_mode/11-status_spec.lua | 58 +++++++++++++++++-- 1 file changed, 52 insertions(+), 6 deletions(-) diff --git a/spec/02-integration/09-hybrid_mode/11-status_spec.lua b/spec/02-integration/09-hybrid_mode/11-status_spec.lua index 86cd89418191..7db58b877772 100644 --- a/spec/02-integration/09-hybrid_mode/11-status_spec.lua +++ b/spec/02-integration/09-hybrid_mode/11-status_spec.lua @@ -74,9 +74,6 @@ for _, strategy in helpers.each_strategy() do end) describe("dp status ready endpoint for no config", function() - -- XXX FIXME - local skip_rpc_sync = rpc_sync == "on" and pending or it - lazy_setup(function() assert(start_kong_cp()) assert(start_kong_dp()) @@ -107,8 +104,8 @@ for _, strategy in helpers.each_strategy() do end) -- now dp receive config from cp, so dp should be ready - - skip_rpc_sync("should return 200 on data plane after configuring", function() + local it_rpc_sync_off= rpc_sync == "off" and it or pending + it_rpc_sync_off("should return 200 on data plane after configuring", function() helpers.wait_until(function() local http_client = helpers.http_client('127.0.0.1', dp_status_port) @@ -160,10 +157,59 @@ for _, strategy in helpers.each_strategy() do return true end end, 10) - end) end) + local describe_rpc_sync_on = rpc == "on" and rpc_sync == "on" and describe or pending + describe_rpc_sync_on("dp status ready when rpc_sync == on", function() + lazy_setup(function() + assert(start_kong_cp()) + assert(start_kong_dp()) + end) + + lazy_teardown(function() + assert(helpers.stop_kong("serve_cp")) + assert(helpers.stop_kong("serve_dp")) + end) + + it("should return 200 on data plane after configuring when rpc_sync == on", function() + -- insert one entity to make dp ready for incremental sync + + local http_client = helpers.http_client('127.0.0.1', dp_status_port) + + local res = http_client:send({ + method = "GET", + path = "/status/ready", + }) + http_client:close() + assert.equal(503, res.status) + + local admin_client = helpers.admin_client(10000) + local res = assert(admin_client:post("/services", { + body = { name = "service-001", url = "https://127.0.0.1:15556/request", }, + headers = {["Content-Type"] = "application/json"} + })) + assert.res_status(201, res) + + admin_client:close() + + helpers.wait_until(function() + local http_client = helpers.http_client('127.0.0.1', dp_status_port) + + local res = http_client:send({ + method = "GET", + path = "/status/ready", + }) + + local status = res and res.status + http_client:close() + + if status == 200 then + return true + end + end, 10) + end) + end) end) end -- for _, strategy end -- for rpc_sync From dc1a42dde90b3834bb8051d52d68c45c9a45ac92 Mon Sep 17 00:00:00 2001 From: BrianChen Date: Mon, 23 Dec 2024 17:05:34 +0800 Subject: [PATCH 18/18] close socket connection when websocket connect failed to avoid potential leak (#14038) * close socket connection when websocket connect failed to avoid potential leak * add changelog * fix changelog --- .../unreleased/kong/fix-potential-socket-connection-leak.yml | 3 +++ kong/clustering/rpc/manager.lua | 1 + kong/clustering/utils.lua | 1 + 3 files changed, 5 insertions(+) create mode 100644 changelog/unreleased/kong/fix-potential-socket-connection-leak.yml diff --git a/changelog/unreleased/kong/fix-potential-socket-connection-leak.yml b/changelog/unreleased/kong/fix-potential-socket-connection-leak.yml new file mode 100644 index 000000000000..218bcca26a5f --- /dev/null +++ b/changelog/unreleased/kong/fix-potential-socket-connection-leak.yml @@ -0,0 +1,3 @@ +message: "Fix potential socket connection leak when websocket client connection fails" +type: bugfix +scope: Core diff --git a/kong/clustering/rpc/manager.lua b/kong/clustering/rpc/manager.lua index eefb1aabb6c5..c90bbb56b69b 100644 --- a/kong/clustering/rpc/manager.lua +++ b/kong/clustering/rpc/manager.lua @@ -569,6 +569,7 @@ function _M:connect(premature, node_id, host, path, cert, key) ::err:: if not exiting() then + c:close() self:try_connect(reconnection_delay) end end diff --git a/kong/clustering/utils.lua b/kong/clustering/utils.lua index f05be2353fbc..5959f8f0a837 100644 --- a/kong/clustering/utils.lua +++ b/kong/clustering/utils.lua @@ -106,6 +106,7 @@ function _M.connect_cp(dp, endpoint, protocols) local ok, err = c:connect(uri, opts) if not ok then + c:close() return nil, uri, err end