diff --git a/README.md b/README.md index e9164cd..23b576b 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,18 @@ openai_api_key: # OpenAI API Key openai_model: # OpenAI Model # Default Value is "gpt-4o-mini" +openai_temperature: # OpenAI Temperature +# Default Value is 0.5 + +openai_top_p: # OpenAI Top P +# Default not set + +openai_frequency_penalty: # OpenAI Frequency Penalty +# Default Value is 0 + +openai_presence_penalty: # OpenAI Presence Penalty +# Default Value is 0 + system_prompt: # System Prompt for the assistant user_input: # User Input for the assistant diff --git a/action.yml b/action.yml index cdbecbf..a64de68 100644 --- a/action.yml +++ b/action.yml @@ -18,6 +18,21 @@ inputs: description: "OpenAI Model" required: false default: "gpt-4o-mini" + openai_temperature: + description: "OpenAI Temperature" + required: false + default: "0.5" + openai_top_p: + description: "OpenAI Top P" + required: false + openai_frequency_penalty: + description: "OpenAI Frequency Penalty" + required: false + default: "0" + openai_presence_penalty: + description: "OpenAI Presence Penalty" + required: false + default: "0" system_prompt: description: "System Prompt for the assistant" required: true diff --git a/src/index.ts b/src/index.ts index eee78ae..da33157 100644 --- a/src/index.ts +++ b/src/index.ts @@ -7,6 +7,10 @@ import { RunnableToolFunctionWithParse } from "openai/lib/RunnableFunction"; export let openai_base_url: string; export let openai_api_key: string; export let openai_model: string; +export let openai_temperature: number | undefined; +export let openai_top_p: number | undefined; +export let openai_frequency_penalty: number | undefined; +export let openai_presence_penalty: number | undefined; export let system_prompt: string; export let user_input: string; export let github_token: string; @@ -24,6 +28,18 @@ async function checkInput() { if (!openai_model) { openai_model = "gpt-4o-mini"; } + if (isNaN(openai_temperature || NaN)) { + openai_temperature = undefined; + } + if (isNaN(openai_top_p || NaN)) { + openai_top_p = undefined; + } + if (isNaN(openai_frequency_penalty || NaN)) { + openai_frequency_penalty = undefined; + } + if (isNaN(openai_presence_penalty || NaN)) { + openai_presence_penalty = undefined; + } if (!system_prompt) { throw new Error("system_prompt is required"); } @@ -49,6 +65,14 @@ async function main() { openai_base_url = core.getInput("openai_base_url"); openai_api_key = core.getInput("openai_api_key"); openai_model = core.getInput("openai_model"); + openai_temperature = parseFloat(core.getInput("openai_temperature")); + openai_top_p = parseFloat(core.getInput("openai_top_p")); + openai_frequency_penalty = parseFloat( + core.getInput("openai_frequency_penalty") + ); + openai_presence_penalty = parseFloat( + core.getInput("openai_presence_penalty") + ); system_prompt = core.getInput("system_prompt"); user_input = core.getInput("user_input"); github_token = core.getInput("github_token"); @@ -61,6 +85,10 @@ async function main() { }); const runner = openai.beta.chat.completions.runTools({ model: openai_model, + temperature: openai_temperature, + frequency_penalty: openai_frequency_penalty, + top_p: openai_top_p, + presence_penalty: openai_presence_penalty, messages: [ { role: "system",