diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index fd4eb59ce77..97392a004c9 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -70,6 +70,7 @@ export class ChatGPTApi implements LLMApi { presence_penalty: modelConfig.presence_penalty, frequency_penalty: modelConfig.frequency_penalty, top_p: modelConfig.top_p, + max_tokens: Math.max(modelConfig.max_tokens, 1024), }; console.log("[Request] openai payload: ", requestPayload); diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx index 6e4c9bcb17b..1c730e1449f 100644 --- a/app/components/model-config.tsx +++ b/app/components/model-config.tsx @@ -76,8 +76,8 @@ export function ModelConfigList(props: { > props.updateConfig( diff --git a/app/store/config.ts b/app/store/config.ts index 5fcd6ff514c..17eb88c30ed 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -49,7 +49,7 @@ export const DEFAULT_CONFIG = { model: "gpt-3.5-turbo" as ModelType, temperature: 0.5, top_p: 1, - max_tokens: 2000, + max_tokens: 8192, presence_penalty: 0, frequency_penalty: 0, sendMemory: true, @@ -82,7 +82,7 @@ export const ModalConfigValidator = { return x as ModelType; }, max_tokens(x: number) { - return limitNumber(x, 0, 100000, 2000); + return limitNumber(x, 0, 512000, 1024); }, presence_penalty(x: number) { return limitNumber(x, -2, 2, 0);