diff --git a/dotCMS/src/main/java/com/dotcms/ai/service/OpenAIService.java b/dotCMS/src/main/java/com/dotcms/ai/service/OpenAIService.java new file mode 100644 index 000000000000..94afd84c5639 --- /dev/null +++ b/dotCMS/src/main/java/com/dotcms/ai/service/OpenAIService.java @@ -0,0 +1,9 @@ +package com.dotcms.ai.service; + +import com.dotmarketing.util.json.JSONObject; + +public interface OpenAIService { + + JSONObject getSupportedModels(); + +} diff --git a/dotCMS/src/main/java/com/dotcms/ai/service/OpenAIServiceImpl.java b/dotCMS/src/main/java/com/dotcms/ai/service/OpenAIServiceImpl.java new file mode 100644 index 000000000000..56a51c0b3ef0 --- /dev/null +++ b/dotCMS/src/main/java/com/dotcms/ai/service/OpenAIServiceImpl.java @@ -0,0 +1,25 @@ +package com.dotcms.ai.service; + +import com.dotcms.ai.app.AppConfig; +import com.dotcms.ai.util.OpenAIRequest; +import com.dotmarketing.util.json.JSONObject; + +import javax.ws.rs.HttpMethod; + +public class OpenAIServiceImpl implements OpenAIService { + + private static final String AI_MODELS_URL = "https://api.openai.com/v1/models"; + + private final AppConfig appConfig; + + public OpenAIServiceImpl(final AppConfig appConfig) { + this.appConfig = appConfig; + } + + @Override + public JSONObject getSupportedModels() { + final String response = OpenAIRequest.doRequest(AI_MODELS_URL, HttpMethod.GET, appConfig.getApiKey(), null); + return new JSONObject(response); + } + +} diff --git a/dotCMS/src/main/java/com/dotcms/ai/util/OpenAIModel.java b/dotCMS/src/main/java/com/dotcms/ai/util/OpenAIModel.java deleted file mode 100644 index 7e368c002ae3..000000000000 --- a/dotCMS/src/main/java/com/dotcms/ai/util/OpenAIModel.java +++ /dev/null @@ -1,78 +0,0 @@ -package com.dotcms.ai.util; - -import com.dotmarketing.exception.DotRuntimeException; - -import java.util.Arrays; -import java.util.stream.Collectors; - -/** - * Enum representing different models of OpenAI. - * Each enum value contains the model name, tokens per minute, API per minute, maximum tokens, and a flag indicating if it's a completion model. - */ -public enum OpenAIModel { - - GPT_3_5_TURBO("gpt-3.5-turbo", 3000, 3500, 4096, true), - GPT_3_5_TURBO_16k("gpt-3.5-turbo-16k", 180000, 3500, 16384, true), - GPT_4("gpt-4", 10000, 200, 8191, true), - GPT_4_TURBO("gpt-4-1106-preview", 10000, 200, 128000, true), - GPT_4_TURBO_PREVIEW("gpt-4-turbo-preview", 10000, 200, 128000, true), - TEXT_EMBEDDING_ADA_002("text-embedding-ada-002", 1000000, 3000, 8191, false), - DALL_E_2("dall-e-2", 0, 50, 0, false), - DALL_E_3("dall-e-3", 0, 50, 0, false); - - public final int tokensPerMinute; - public final int apiPerMinute; - public final int maxTokens; - public final String modelName; - public final boolean completionModel; - - OpenAIModel(final String modelName, - final int tokensPerMinute, - final int apiPerMinute, - final int maxTokens, - final boolean completionModel) { - this.modelName = modelName; - this.tokensPerMinute = tokensPerMinute; - this.apiPerMinute = apiPerMinute; - this.maxTokens = maxTokens; - this.completionModel = completionModel; - } - - /** - * Resolves the model based on the input string. - * - * @param modelIn The input string representing the model. - * @return The corresponding OpenAIModel. - * @throws DotRuntimeException If the input string does not correspond to any OpenAIModel. - */ - public static OpenAIModel resolveModel(final String modelIn) { - final String modelOut = modelIn.replace("-", "_").replace(".", "_").toUpperCase().trim(); - for (final OpenAIModel openAiModel : OpenAIModel.values()) { - if (openAiModel.modelName.equalsIgnoreCase(modelIn) || openAiModel.name().equalsIgnoreCase(modelOut)) { - return openAiModel; - } - } - - throw new DotRuntimeException( - "Unable to parse model:'" + modelIn + "'. Only " + supportedModels() + " are supported "); - } - - /** - * Returns a string representing the supported models. - * - * @return A string representing the supported models. - */ - private static String supportedModels() { - return Arrays.stream(OpenAIModel.values()).map(o -> o.modelName).collect(Collectors.joining(", ")); - } - - /** - * Returns the minimum interval between calls for the model. - * - * @return The minimum interval between calls for the model. - */ - public long minIntervalBetweenCalls() { - return 60000 / apiPerMinute; - } - -} diff --git a/dotCMS/src/main/resources/apps/dotAI.yml b/dotCMS/src/main/resources/apps/dotAI.yml index c48eb25b26e0..0d3b49dc5eef 100644 --- a/dotCMS/src/main/resources/apps/dotAI.yml +++ b/dotCMS/src/main/resources/apps/dotAI.yml @@ -61,19 +61,75 @@ params: value: "1920x1080" - label: "256x256 (Small Square 1:1)" value: "256x256" - model: - value: "gpt-3.5-turbo-16k" + modelName: + value: "" hidden: false type: "STRING" - label: "Model" + label: "Model Name" hint: "Model used to generate ChatGPT API response." required: true - imageModel: - value: "dall-e-3" + modelTokensPerMinute: + value: "" + hidden: false + type: "STRING" + label: "Tokens per Minute" + hint: "Tokens per minute used to generate ChatGPT API response." + required: true + modelApiPerMinute: + value: "" + hidden: false + type: "STRING" + label: "API per Minute" + hint: "API per minute used to generate ChatGPT API response." + required: true + modelMaxTokens: + value: "" + hidden: false + type: "STRING" + label: "Max Tokens" + hint: "Maximum number of tokens used to generate ChatGPT API response." + required: true + modelCompletion: + value: "" + hidden: false + type: "BOOL" + label: "Completion model enabled" + hint: "Enable completion model used to generate ChatGPT API response." + required: true + imageModelName: + value: "" hidden: false type: "STRING" - label: "Image Model" - hint: "Image Model used to generate AI Images" + label: "Image Model Name" + hint: "Model used to generate ChatGPT API response." + required: true + imageModelTokensPerMinute: + value: "" + hidden: false + type: "STRING" + label: "Image Tokens per Minute" + hint: "Tokens per minute used to generate ChatGPT API response." + required: true + imageModelApiPerMinute: + value: "" + hidden: false + type: "STRING" + label: "Image API per Minute" + hint: "API per minute used to generate ChatGPT API response." + required: true + imageModelMaxTokens: + value: "" + hidden: false + type: "STRING" + label: "Image Max Tokens" + hint: "Maximum number of tokens used to generate ChatGPT API response." + required: true + imageModelCompletion: + value: "" + hidden: false + type: "BOOL" + label: "Image Completion model enabled" + hint: "Enable completion model used to generate ChatGPT API response." required: true listenerIndexer: value: ""