Skip to content

Commit

Permalink
chore(*): remove empty line with spaces
Browse files Browse the repository at this point in the history
  • Loading branch information
fffonion committed Aug 7, 2024
1 parent 0bc0c99 commit 97e1ab3
Show file tree
Hide file tree
Showing 21 changed files with 158 additions and 158 deletions.
4 changes: 2 additions & 2 deletions kong/llm/drivers/anthropic.lua
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ local function to_claude_prompt(req)
return kong_messages_to_claude_prompt(req.messages)

end

return nil, "request is missing .prompt and .messages commands"
end

Expand Down Expand Up @@ -328,7 +328,7 @@ function _M.from_format(response_string, model_info, route_type)
if not transform then
return nil, fmt("no transformer available from format %s://%s", model_info.provider, route_type)
end

local ok, response_string, err, metadata = pcall(transform, response_string, model_info, route_type)
if not ok or err then
return nil, fmt("transformation failed from type %s://%s: %s",
Expand Down
2 changes: 1 addition & 1 deletion kong/llm/drivers/azure.lua
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ function _M.configure_request(conf)
-- technically min supported version
query_table["api-version"] = kong.request.get_query_arg("api-version")
or (conf.model.options and conf.model.options.azure_api_version)

if auth_param_name and auth_param_value and auth_param_location == "query" then
query_table[auth_param_name] = auth_param_value
end
Expand Down
2 changes: 1 addition & 1 deletion kong/llm/drivers/bedrock.lua
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ function _M.from_format(response_string, model_info, route_type)
if not transformers_from[route_type] then
return nil, fmt("no transformer available from format %s://%s", model_info.provider, route_type)
end

local ok, response_string, err, metadata = pcall(transformers_from[route_type], response_string, model_info, route_type)
if not ok or err then
return nil, fmt("transformation failed from type %s://%s: %s",
Expand Down
52 changes: 26 additions & 26 deletions kong/llm/drivers/cohere.lua
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ local _CHAT_ROLES = {

local function handle_stream_event(event_t, model_info, route_type)
local metadata

-- discard empty frames, it should either be a random new line, or comment
if (not event_t.data) or (#event_t.data < 1) then
return
Expand All @@ -31,12 +31,12 @@ local function handle_stream_event(event_t, model_info, route_type)
if err then
return nil, "failed to decode event frame from cohere: " .. err, nil
end

local new_event

if event.event_type == "stream-start" then
kong.ctx.plugin.ai_proxy_cohere_stream_id = event.generation_id

-- ignore the rest of this one
new_event = {
choices = {
Expand All @@ -52,7 +52,7 @@ local function handle_stream_event(event_t, model_info, route_type)
model = model_info.name,
object = "chat.completion.chunk",
}

elseif event.event_type == "text-generation" then
-- this is a token
if route_type == "stream/llm/v1/chat" then
Expand Down Expand Up @@ -137,19 +137,19 @@ end
local function handle_json_inference_event(request_table, model)
request_table.temperature = request_table.temperature
request_table.max_tokens = request_table.max_tokens

request_table.p = request_table.top_p
request_table.k = request_table.top_k

request_table.top_p = nil
request_table.top_k = nil

request_table.model = model.name or request_table.model
request_table.stream = request_table.stream or false -- explicitly set this

if request_table.prompt and request_table.messages then
return kong.response.exit(400, "cannot run a 'prompt' and a history of 'messages' at the same time - refer to schema")

elseif request_table.messages then
-- we have to move all BUT THE LAST message into "chat_history" array
-- and move the LAST message (from 'user') into "message" string
Expand All @@ -164,26 +164,26 @@ local function handle_json_inference_event(request_table, model)
else
role = _CHAT_ROLES.user
end

chat_history[i] = {
role = role,
message = v.content,
}
end
end

request_table.chat_history = chat_history
end

request_table.message = request_table.messages[#request_table.messages].content
request_table.messages = nil

elseif request_table.prompt then
request_table.prompt = request_table.prompt
request_table.messages = nil
request_table.message = nil
end

return request_table, "application/json", nil
end

Expand All @@ -202,7 +202,7 @@ local transformers_from = {
-- messages/choices table is only 1 size, so don't need to static allocate
local messages = {}
messages.choices = {}

if response_table.prompt and response_table.generations then
-- this is a "co.generate"
for i, v in ipairs(response_table.generations) do
Expand All @@ -215,7 +215,7 @@ local transformers_from = {
messages.object = "text_completion"
messages.model = model_info.name
messages.id = response_table.id

local stats = {
completion_tokens = response_table.meta
and response_table.meta.billed_units
Expand All @@ -230,10 +230,10 @@ local transformers_from = {
and (response_table.meta.billed_units.output_tokens + response_table.meta.billed_units.input_tokens),
}
messages.usage = stats

elseif response_table.text then
-- this is a "co.chat"

messages.choices[1] = {
index = 0,
message = {
Expand All @@ -245,7 +245,7 @@ local transformers_from = {
messages.object = "chat.completion"
messages.model = model_info.name
messages.id = response_table.generation_id

local stats = {
completion_tokens = response_table.meta
and response_table.meta.billed_units
Expand All @@ -260,10 +260,10 @@ local transformers_from = {
and (response_table.meta.billed_units.output_tokens + response_table.meta.billed_units.input_tokens),
}
messages.usage = stats

else -- probably a fault
return nil, "'text' or 'generations' missing from cohere response body"

end

return cjson.encode(messages)
Expand Down Expand Up @@ -314,17 +314,17 @@ local transformers_from = {
prompt.object = "chat.completion"
prompt.model = model_info.name
prompt.id = response_table.generation_id

local stats = {
completion_tokens = response_table.token_count and response_table.token_count.response_tokens,
prompt_tokens = response_table.token_count and response_table.token_count.prompt_tokens,
total_tokens = response_table.token_count and response_table.token_count.total_tokens,
}
prompt.usage = stats

else -- probably a fault
return nil, "'text' or 'generations' missing from cohere response body"

end

return cjson.encode(prompt)
Expand Down Expand Up @@ -465,7 +465,7 @@ function _M.configure_request(conf)
and ai_shared.operation_map[DRIVER_NAME][conf.route_type].path
or "/"
end

-- if the path is read from a URL capture, ensure that it is valid
parsed_url.path = string_gsub(parsed_url.path, "^/*", "/")

Expand Down
4 changes: 2 additions & 2 deletions kong/llm/drivers/gemini.lua
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ local function to_gemini_chat_openai(request_table, model_info, route_type)
}
end
end

new_r.generationConfig = to_gemini_generation_config(request_table)

return new_r, "application/json", nil
Expand Down Expand Up @@ -222,7 +222,7 @@ function _M.from_format(response_string, model_info, route_type)
if not transformers_from[route_type] then
return nil, fmt("no transformer available from format %s://%s", model_info.provider, route_type)
end

local ok, response_string, err, metadata = pcall(transformers_from[route_type], response_string, model_info, route_type)
if not ok or err then
return nil, fmt("transformation failed from type %s://%s: %s",
Expand Down
2 changes: 1 addition & 1 deletion kong/llm/drivers/llama2.lua
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ local function to_raw(request_table, model)
messages.parameters.top_k = request_table.top_k
messages.parameters.temperature = request_table.temperature
messages.parameters.stream = request_table.stream or false -- explicitly set this

if request_table.prompt and request_table.messages then
return kong.response.exit(400, "cannot run raw 'prompt' and chat history 'messages' requests at the same time - refer to schema")

Expand Down
4 changes: 2 additions & 2 deletions kong/llm/drivers/openai.lua
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ function _M.from_format(response_string, model_info, route_type)
if not transformers_from[route_type] then
return nil, fmt("no transformer available from format %s://%s", model_info.provider, route_type)
end

local ok, response_string, err = pcall(transformers_from[route_type], response_string, model_info)
if not ok or err then
return nil, fmt("transformation failed from type %s://%s: %s",
Expand Down Expand Up @@ -203,7 +203,7 @@ function _M.configure_request(conf)
parsed_url = socket_url.parse(ai_shared.upstream_url_format[DRIVER_NAME])
parsed_url.path = path
end

-- if the path is read from a URL capture, ensure that it is valid
parsed_url.path = string_gsub(parsed_url.path, "^/*", "/")

Expand Down
4 changes: 2 additions & 2 deletions kong/llm/drivers/shared.lua
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ function _M.frame_to_events(frame, provider)

-- it may start with ',' which is the start of the new frame
frame = (string.sub(str_ltrim(frame), 1, 1) == "," and string.sub(str_ltrim(frame), 2)) or frame

-- it may end with the array terminator ']' indicating the finished stream
if string.sub(str_rtrim(frame), -1) == "]" then
frame = string.sub(str_rtrim(frame), 1, -2)
Expand Down Expand Up @@ -446,7 +446,7 @@ function _M.from_ollama(response_string, model_info, route_type)

end
end

if output and output ~= _M._CONST.SSE_TERMINATOR then
output, err = cjson.encode(output)
end
Expand Down
16 changes: 8 additions & 8 deletions spec/02-integration/22-ai_plugins/01-reports_spec.lua
Original file line number Diff line number Diff line change
Expand Up @@ -38,32 +38,32 @@ for _, strategy in helpers.each_strategy() do
local fixtures = {
http_mock = {},
}

fixtures.http_mock.openai = [[
server {
server_name openai;
listen ]]..MOCK_PORT..[[;
default_type 'application/json';
location = "/llm/v1/chat/good" {
content_by_lua_block {
local pl_file = require "pl.file"
local json = require("cjson.safe")
ngx.req.read_body()
local body, err = ngx.req.get_body_data()
body, err = json.decode(body)
local token = ngx.req.get_headers()["authorization"]
local token_query = ngx.req.get_uri_args()["apikey"]
if token == "Bearer openai-key" or token_query == "openai-key" or body.apikey == "openai-key" then
ngx.req.read_body()
local body, err = ngx.req.get_body_data()
body, err = json.decode(body)
if err or (body.messages == ngx.null) then
ngx.status = 400
ngx.print(pl_file.read("spec/fixtures/ai-proxy/openai/llm-v1-chat/responses/bad_request.json"))
Expand Down
12 changes: 6 additions & 6 deletions spec/03-plugins/38-ai-proxy/00-config_spec.lua
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ describe(PLUGIN_NAME .. ": (schema)", function()
end

local ok, err = validate(config)

assert.is_truthy(ok)
assert.is_falsy(err)
end)
Expand Down Expand Up @@ -220,7 +220,7 @@ describe(PLUGIN_NAME .. ": (schema)", function()
}

local ok, err = validate(config)

assert.equal(err["config"]["@entity"][1], "must set one of 'auth.header_name', 'auth.param_name', "
.. "and its respective options, when provider is not self-hosted")
assert.is_falsy(ok)
Expand All @@ -244,7 +244,7 @@ describe(PLUGIN_NAME .. ": (schema)", function()
}

local ok, err = validate(config)

assert.equals(err["config"]["@entity"][1], "all or none of these fields must be set: 'auth.header_name', 'auth.header_value'")
assert.is_falsy(ok)
end)
Expand All @@ -268,7 +268,7 @@ describe(PLUGIN_NAME .. ": (schema)", function()
}

local ok, err = validate(config)

assert.is_falsy(err)
assert.is_truthy(ok)
end)
Expand Down Expand Up @@ -317,7 +317,7 @@ describe(PLUGIN_NAME .. ": (schema)", function()
}

local ok, err = validate(config)

assert.is_falsy(err)
assert.is_truthy(ok)
end)
Expand All @@ -344,7 +344,7 @@ describe(PLUGIN_NAME .. ": (schema)", function()
}

local ok, err = validate(config)

assert.is_falsy(err)
assert.is_truthy(ok)
end)
Expand Down
Loading

0 comments on commit 97e1ab3

Please sign in to comment.