diff --git a/kong/plugins/prometheus/exporter.lua b/kong/plugins/prometheus/exporter.lua index f25eeeaeb012..2a94ebac272c 100644 --- a/kong/plugins/prometheus/exporter.lua +++ b/kong/plugins/prometheus/exporter.lua @@ -331,19 +331,10 @@ local function log(message, serialized) if serialized.ai_metrics then for _, ai_plugin in pairs(serialized.ai_metrics) do - local cache_status - if ai_plugin.cache and ai_plugin.cache.cache_status then - cache_status = ai_plugin.cache.cache_status - end - - local vector_db, embeddings_provider, embeddings_model - if ai_plugin.cache then - vector_db = ai_plugin.cache.vector_db - - embeddings_provider = ai_plugin.cache.embeddings_provider - - embeddings_model = ai_plugin.cache.embeddings_model - end + local cache_status = ai_plugin.cache.cache_status or "" + local vector_db = ai_plugin.cache.vector_db or "" + local embeddings_provider = ai_plugin.cache.embeddings_provider or "" + local embeddings_model = ai_plugin.cache.embeddings_model or "" labels_table_ai_llm_status[1] = ai_plugin.meta.provider_name labels_table_ai_llm_status[2] = ai_plugin.meta.request_model diff --git a/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua b/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua index 933d8f6450ae..37f7d8ac9c1b 100644 --- a/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua +++ b/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua @@ -46,10 +46,10 @@ local _EXPECTED_CHAT_STATS = { response_model = 'gpt-3.5-turbo-0613', }, usage = { - completion_tokens = 12, - cost = 0.00037, prompt_tokens = 25, + completion_tokens = 12, total_tokens = 37, + cost = 0.00037, }, cache = {} }, diff --git a/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua b/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua index 5ec5f94c5c18..0e8014dc5fee 100644 --- a/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua +++ b/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua @@ -126,10 +126,10 @@ local _EXPECTED_CHAT_STATS = { response_model = 'gpt-3.5-turbo-0613', }, usage = { - completion_tokens = 12, - cost = 0.00037, prompt_tokens = 25, + completion_tokens = 12, total_tokens = 37, + cost = 0.00037, }, cache = {} }, diff --git a/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua b/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua index 395897a9eebe..34f5afab3b6c 100644 --- a/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua +++ b/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua @@ -183,10 +183,10 @@ local _EXPECTED_CHAT_STATS = { response_model = 'gpt-3.5-turbo-0613', }, usage = { - completion_tokens = 12, - cost = 0.00037, prompt_tokens = 25, + completion_tokens = 12, total_tokens = 37, + cost = 0.00037, }, cache = {} },