diff --git a/DESCRIPTION b/DESCRIPTION index b16d2ba0..4c8cb547 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -28,16 +28,14 @@ Imports: glue, htmltools, htmlwidgets, - httr2, + httr2 (>= 1.0.3.9000), ids, jsonlite, purrr, - R6 (>= 2.0), rlang, rstudioapi (>= 0.12), shiny (>= 1.9.0), shiny.i18n, - SSEparser, stringr (>= 1.5.0), utils, yaml @@ -57,6 +55,8 @@ Suggests: withr Config/testthat/edition: 3 Config/testthat/parallel: true +Remotes: + r-lib/httr2 Encoding: UTF-8 Language: en-US LazyData: true diff --git a/NAMESPACE b/NAMESPACE index 46fd2c12..5a937501 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -34,9 +34,11 @@ S3method(list_available_models,ollama) S3method(list_available_models,openai) S3method(list_available_models,perplexity) export(chat) +export(create_chat_azure_openai) +export(create_chat_google) +export(create_chat_ollama) +export(create_chat_openai) export(create_completion_anthropic) -export(create_completion_azure_openai) -export(create_completion_google) export(create_completion_huggingface) export(get_available_endpoints) export(get_available_models) @@ -52,7 +54,6 @@ export(gptstudio_sitrep) export(gptstudio_skeleton_build) export(gptstudio_spelling_grammar) export(input_audio_clip) -export(openai_create_chat_completion) export(transcribe_audio) import(cli) import(htmltools) @@ -60,11 +61,9 @@ import(htmlwidgets) import(httr2) import(rlang) import(shiny) -importFrom(R6,R6Class) importFrom(glue,glue) importFrom(htmltools,div) importFrom(htmltools,tag) importFrom(htmltools,tagList) importFrom(htmltools,tags) -importFrom(jsonlite,fromJSON) importFrom(shiny,icon) diff --git a/NEWS.md b/NEWS.md index 3960a992..dcc4dd25 100644 --- a/NEWS.md +++ b/NEWS.md @@ -11,7 +11,7 @@ - New styling of chat app. #224 - Add code syntax highlighting to chat app. #224 - Replace curl calls with httr2. #224 -- Remove magrittr pipe in favor of base pipe, require R >= 4.1 +- Replace %>% with |>, bump min R to >=4.1, remove revdep folder. #226 ## gptstudio 0.4.0 diff --git a/R/api_perform_request.R b/R/api_perform_request.R index 1152e2ae..e3aca461 100644 --- a/R/api_perform_request.R +++ b/R/api_perform_request.R @@ -24,7 +24,6 @@ gptstudio_request_perform <- function(skeleton, ...) { #' @export gptstudio_request_perform.gptstudio_request_openai <- function(skeleton, ..., shiny_session = NULL) { - # Translate request skeleton$history <- chat_history_append( history = skeleton$history, @@ -37,46 +36,12 @@ gptstudio_request_perform.gptstudio_request_openai <- function(skeleton, ..., skeleton$history <- add_docs_messages_to_history(skeleton$history) } - body <- list( - "model" = skeleton$model, - "stream" = skeleton$stream, - "messages" = skeleton$history, - "max_tokens" = skeleton$extras$max_tokens, - "n" = skeleton$extra$n - ) - - # Create request - request <- request(skeleton$url) |> - req_auth_bearer_token(skeleton$api_key) |> - req_body_json(body) - - # Perform request - response <- NULL - - if (isTRUE(skeleton$stream)) { - if (is.null(shiny_session)) stop("Stream requires a shiny session object") - - stream_handler <- OpenaiStreamParser$new( - session = shiny_session, - user_prompt = skeleton$prompt - ) - - stream_chat_completion( - messages = skeleton$history, - element_callback = stream_handler$parse_sse, - model = skeleton$model, - openai_api_key = skeleton$api_key - ) - - response <- stream_handler$value - } else { - response_json <- request |> - req_perform() |> - resp_body_json() + response <- create_chat_openai(prompt = skeleton$history, + model = skeleton$model, + stream = skeleton$stream, + shiny_session = shiny_session, + user_prompt = skeleton$prompt) - response <- response_json$choices[[1]]$message$content - } - # return value structure( list( skeleton = skeleton, @@ -104,50 +69,68 @@ gptstudio_request_perform.gptstudio_request_huggingface <- } #' @export -gptstudio_request_perform.gptstudio_request_google <- - function(skeleton, ...) { - response <- create_completion_google(prompt = skeleton$prompt) - structure( - list( - skeleton = skeleton, - response = response - ), - class = "gptstudio_response_google" - ) +gptstudio_request_perform.gptstudio_request_google <- function(skeleton, ...) { + skeleton$history <- chat_history_append( + history = skeleton$history, + role = "user", + name = "user_message", + content = skeleton$prompt + ) + + if (getOption("gptstudio.read_docs")) { + skeleton$history <- add_docs_messages_to_history(skeleton$history) } -#' @export -gptstudio_request_perform.gptstudio_request_anthropic <- - function(skeleton, ...) { - model <- skeleton$model + response <- create_chat_google(prompt = skeleton$history, + model = skeleton$model) - skeleton$history <- chat_history_append( - history = skeleton$history, - role = "user", - content = skeleton$prompt - ) + structure( + list( + skeleton = skeleton, + response = response + ), + class = "gptstudio_response_google" + ) +} - # Anthropic does not have a system message, so convert it to user - system <- - purrr::keep(skeleton$history, function(x) x$role == "system") |> - purrr::pluck("content") - history <- - purrr::keep(skeleton$history, function(x) x$role %in% c("user", "assistant")) +#' @export +gptstudio_request_perform.gptstudio_request_anthropic <- function(skeleton, + shiny_session = NULL, + ...) { + model <- skeleton$model + stream <- skeleton$stream + prompt <- skeleton$prompt - cli_inform(c("i" = "Using Anthropic API with {model} model")) - response <- create_completion_anthropic( - prompt = history, - system = system, - model = model - ) - structure( - list( - skeleton = skeleton, - response = response - ), - class = "gptstudio_response_anthropic" - ) - } + skeleton$history <- chat_history_append( + history = skeleton$history, + role = "user", + content = skeleton$prompt + ) + + # Anthropic does not have a system message, so convert it to user + system <- + purrr::keep(skeleton$history, function(x) x$role == "system") |> + purrr::pluck("content") + history <- + purrr::keep(skeleton$history, function(x) x$role %in% c("user", "assistant")) + + cli_inform(c("i" = "Using Anthropic API with {model} model")) + response <- create_completion_anthropic( + prompt = history, + system = system, + model = model, + stream = stream, + shiny_session = shiny_session, + user_prompt = prompt + ) + structure( + list( + skeleton = skeleton, + response = response + ), + class = "gptstudio_response_anthropic" + ) +} #' @export gptstudio_request_perform.gptstudio_request_azure_openai <- function(skeleton, @@ -161,25 +144,16 @@ gptstudio_request_perform.gptstudio_request_azure_openai <- function(skeleton, content = skeleton$prompt ) - if (isTRUE(skeleton$stream)) { - if (is.null(shiny_session)) stop("Stream requires a shiny session object") - - stream_handler <- OpenaiStreamParser$new( - session = shiny_session, - user_prompt = skeleton$prompt - ) - - stream_azure_openai( - messages = skeleton$history, - element_callback = stream_handler$parse_sse - ) - - response <- stream_handler$value - } else { - response <- query_api_azure_openai(request_body = skeleton$history) - response <- response$choices[[1]]$message$content + if (getOption("gptstudio.read_docs")) { + skeleton$history <- add_docs_messages_to_history(skeleton$history) } + response <- create_chat_azure_openai(prompt = skeleton$history, + model = skeleton$model, + stream = skeleton$stream, + shiny_session = shiny_session, + user_prompt = skeleton$prompt) + structure( list( skeleton = skeleton, @@ -205,9 +179,9 @@ gptstudio_request_perform.gptstudio_request_ollama <- function(skeleton, ..., skeleton$history <- add_docs_messages_to_history(skeleton$history) } - response <- ollama_chat( + response <- create_chat_ollama( model = skeleton$model, - messages = skeleton$history, + prompt = skeleton$history, stream = skeleton$stream, shiny_session = shiny_session, user_prompt = skeleton$prompt @@ -217,7 +191,7 @@ gptstudio_request_perform.gptstudio_request_ollama <- function(skeleton, ..., structure( list( skeleton = skeleton, - response = response$message$content + response = response ), class = "gptstudio_response_ollama" ) diff --git a/R/api_process_response.R b/R/api_process_response.R index 05409b9f..fd7adbb2 100644 --- a/R/api_process_response.R +++ b/R/api_process_response.R @@ -19,8 +19,7 @@ gptstudio_response_process <- function(skeleton, ...) { } #' @export -gptstudio_response_process.gptstudio_response_openai <- - function(skeleton, ...) { +gptstudio_response_process.gptstudio_response_openai <- function(skeleton, ...) { last_response <- skeleton$response skeleton <- skeleton$skeleton @@ -41,8 +40,7 @@ gptstudio_response_process.gptstudio_response_openai <- } #' @export -gptstudio_response_process.gptstudio_response_huggingface <- - function(skeleton, ...) { +gptstudio_response_process.gptstudio_response_huggingface <- function(skeleton, ...) { response <- skeleton$response skeleton <- skeleton$skeleton last_response <- response[[1]]$generated_text @@ -65,8 +63,7 @@ gptstudio_response_process.gptstudio_response_huggingface <- } #' @export -gptstudio_response_process.gptstudio_response_anthropic <- - function(skeleton, ...) { +gptstudio_response_process.gptstudio_response_anthropic <- function(skeleton, ...) { last_response <- skeleton$response skeleton <- skeleton$skeleton @@ -87,31 +84,27 @@ gptstudio_response_process.gptstudio_response_anthropic <- } #' @export -gptstudio_response_process.gptstudio_response_google <- - function(skeleton, ...) { - response <- skeleton$response - skeleton <- skeleton$skeleton +gptstudio_response_process.gptstudio_response_google <- function(skeleton, ...) { + last_response <- skeleton$response + skeleton <- skeleton$skeleton - new_history <- c( - skeleton$history, - list( - list(role = "user", content = skeleton$prompt), - list(role = "assistant", content = response) - ) - ) + new_history <- chat_history_append( + history = skeleton$history, + role = "assistant", + content = last_response + ) - skeleton$history <- new_history - skeleton$prompt <- NULL # remove the last prompt - class(skeleton) <- c( - "gptstudio_request_skeleton", - "gptstudio_request_google" - ) - skeleton - } + skeleton$history <- new_history + skeleton$prompt <- NULL # remove the last prompt + class(skeleton) <- c( + "gptstudio_request_skeleton", + "gptstudio_request_google" + ) + skeleton +} #' @export -gptstudio_response_process.gptstudio_response_azure_openai <- - function(skeleton, ...) { +gptstudio_response_process.gptstudio_response_azure_openai <- function(skeleton, ...) { last_response <- skeleton$response skeleton <- skeleton$skeleton @@ -153,8 +146,7 @@ gptstudio_response_process.gptstudio_response_ollama <- function(skeleton, ...) } #' @export -gptstudio_response_process.gptstudio_response_perplexity <- - function(skeleton, ...) { +gptstudio_response_process.gptstudio_response_perplexity <- function(skeleton, ...) { response <- skeleton$response skeleton <- skeleton$skeleton diff --git a/R/api_skeletons.R b/R/api_skeletons.R index 791505d4..0205a825 100644 --- a/R/api_skeletons.R +++ b/R/api_skeletons.R @@ -273,7 +273,7 @@ gptstudio_create_skeleton <- function(service = "openai", prompt = prompt, history = history, # forcing false until streaming implemented for anthropic - stream = FALSE + stream = stream ), "google" = new_gptstudio_request_skeleton_google( model = model, diff --git a/R/gptstudio-package.R b/R/gptstudio-package.R index 53cc8f07..ec824ce6 100644 --- a/R/gptstudio-package.R +++ b/R/gptstudio-package.R @@ -8,7 +8,3 @@ #' @importFrom glue glue ## gptstudio namespace: end NULL - -dummy <- function() { - SSEparser::SSEparser -} diff --git a/R/gptstudio-sitrep.R b/R/gptstudio-sitrep.R index d139dc7f..3ffdc435 100644 --- a/R/gptstudio-sitrep.R +++ b/R/gptstudio-sitrep.R @@ -13,7 +13,7 @@ check_api_connection_openai <- function(service, api_key) { } response <- - request_base(task = "models") |> + request_base_openai(task = "models") |> req_error(is_error = function(resp) FALSE) |> req_perform() process_response(response, service) @@ -64,7 +64,7 @@ check_api_connection_google <- function(service, api_key) { request_body <- list(contents = list(list(parts = list(list(text = "Hello there"))))) - response <- request_base_google(model = "gemini-pro", key = api_key) |> + response <- request_base_google(model = "gemini-pro", api_key = api_key) |> req_body_json(data = request_body) |> req_error(is_error = function(resp) FALSE) |> req_perform() @@ -74,7 +74,6 @@ check_api_connection_google <- function(service, api_key) { #' @inheritParams check_api_connection_openai check_api_connection_azure_openai <- function(service, api_key) { - "" api_check <- check_api_key(service, api_key) if (rlang::is_false(api_check)) { return(invisible(NULL)) diff --git a/R/import-standalone-purrr.R b/R/import-standalone-purrr.R new file mode 100644 index 00000000..623142a0 --- /dev/null +++ b/R/import-standalone-purrr.R @@ -0,0 +1,240 @@ +# Standalone file: do not edit by hand +# Source: +# ---------------------------------------------------------------------- +# +# --- +# repo: r-lib/rlang +# file: standalone-purrr.R +# last-updated: 2023-02-23 +# license: https://unlicense.org +# imports: rlang +# --- +# +# This file provides a minimal shim to provide a purrr-like API on top of +# base R functions. They are not drop-in replacements but allow a similar style +# of programming. +# +# ## Changelog +# +# 2023-02-23: +# * Added `list_c()` +# +# 2022-06-07: +# * `transpose()` is now more consistent with purrr when inner names +# are not congruent (#1346). +# +# 2021-12-15: +# * `transpose()` now supports empty lists. +# +# 2021-05-21: +# * Fixed "object `x` not found" error in `imap()` (@mgirlich) +# +# 2020-04-14: +# * Removed `pluck*()` functions +# * Removed `*_cpl()` functions +# * Used `as_function()` to allow use of `~` +# * Used `.` prefix for helpers +# +# nocov start + +map <- function(.x, .f, ...) { + .f <- as_function(.f, env = global_env()) + lapply(.x, .f, ...) +} +walk <- function(.x, .f, ...) { + map(.x, .f, ...) + invisible(.x) +} + +map_lgl <- function(.x, .f, ...) { + .rlang_purrr_map_mold(.x, .f, logical(1), ...) +} +map_int <- function(.x, .f, ...) { + .rlang_purrr_map_mold(.x, .f, integer(1), ...) +} +map_dbl <- function(.x, .f, ...) { + .rlang_purrr_map_mold(.x, .f, double(1), ...) +} +map_chr <- function(.x, .f, ...) { + .rlang_purrr_map_mold(.x, .f, character(1), ...) +} +.rlang_purrr_map_mold <- function(.x, .f, .mold, ...) { + .f <- as_function(.f, env = global_env()) + out <- vapply(.x, .f, .mold, ..., USE.NAMES = FALSE) + names(out) <- names(.x) + out +} + +map2 <- function(.x, .y, .f, ...) { + .f <- as_function(.f, env = global_env()) + out <- mapply(.f, .x, .y, MoreArgs = list(...), SIMPLIFY = FALSE) + if (length(out) == length(.x)) { + set_names(out, names(.x)) + } else { + set_names(out, NULL) + } +} +map2_lgl <- function(.x, .y, .f, ...) { + as.vector(map2(.x, .y, .f, ...), "logical") +} +map2_int <- function(.x, .y, .f, ...) { + as.vector(map2(.x, .y, .f, ...), "integer") +} +map2_dbl <- function(.x, .y, .f, ...) { + as.vector(map2(.x, .y, .f, ...), "double") +} +map2_chr <- function(.x, .y, .f, ...) { + as.vector(map2(.x, .y, .f, ...), "character") +} +imap <- function(.x, .f, ...) { + map2(.x, names(.x) %||% seq_along(.x), .f, ...) +} + +pmap <- function(.l, .f, ...) { + .f <- as.function(.f) + args <- .rlang_purrr_args_recycle(.l) + do.call("mapply", c( + FUN = list(quote(.f)), + args, MoreArgs = quote(list(...)), + SIMPLIFY = FALSE, USE.NAMES = FALSE + )) +} +.rlang_purrr_args_recycle <- function(args) { + lengths <- map_int(args, length) + n <- max(lengths) + + stopifnot(all(lengths == 1L | lengths == n)) + to_recycle <- lengths == 1L + args[to_recycle] <- map(args[to_recycle], function(x) rep.int(x, n)) + + args +} + +keep <- function(.x, .f, ...) { + .x[.rlang_purrr_probe(.x, .f, ...)] +} +discard <- function(.x, .p, ...) { + sel <- .rlang_purrr_probe(.x, .p, ...) + .x[is.na(sel) | !sel] +} +map_if <- function(.x, .p, .f, ...) { + matches <- .rlang_purrr_probe(.x, .p) + .x[matches] <- map(.x[matches], .f, ...) + .x +} +.rlang_purrr_probe <- function(.x, .p, ...) { + if (is_logical(.p)) { + stopifnot(length(.p) == length(.x)) + .p + } else { + .p <- as_function(.p, env = global_env()) + map_lgl(.x, .p, ...) + } +} + +compact <- function(.x) { + Filter(length, .x) +} + +transpose <- function(.l) { + if (!length(.l)) { + return(.l) + } + + inner_names <- names(.l[[1]]) + + if (is.null(inner_names)) { + fields <- seq_along(.l[[1]]) + } else { + fields <- set_names(inner_names) + .l <- map(.l, function(x) { + if (is.null(names(x))) { + set_names(x, inner_names) + } else { + x + } + }) + } + + # This way missing fields are subsetted as `NULL` instead of causing + # an error + .l <- map(.l, as.list) + + map(fields, function(i) { + map(.l, .subset2, i) + }) +} + +every <- function(.x, .p, ...) { + .p <- as_function(.p, env = global_env()) + + for (i in seq_along(.x)) { + if (!rlang::is_true(.p(.x[[i]], ...))) return(FALSE) + } + TRUE +} +some <- function(.x, .p, ...) { + .p <- as_function(.p, env = global_env()) + + for (i in seq_along(.x)) { + if (rlang::is_true(.p(.x[[i]], ...))) return(TRUE) + } + FALSE +} +negate <- function(.p) { + .p <- as_function(.p, env = global_env()) + function(...) !.p(...) +} + +reduce <- function(.x, .f, ..., .init) { + f <- function(x, y) .f(x, y, ...) + Reduce(f, .x, init = .init) +} +reduce_right <- function(.x, .f, ..., .init) { + f <- function(x, y) .f(y, x, ...) + Reduce(f, .x, init = .init, right = TRUE) +} +accumulate <- function(.x, .f, ..., .init) { + f <- function(x, y) .f(x, y, ...) + Reduce(f, .x, init = .init, accumulate = TRUE) +} +accumulate_right <- function(.x, .f, ..., .init) { + f <- function(x, y) .f(y, x, ...) + Reduce(f, .x, init = .init, right = TRUE, accumulate = TRUE) +} + +detect <- function(.x, .f, ..., .right = FALSE, .p = is_true) { + .p <- as_function(.p, env = global_env()) + .f <- as_function(.f, env = global_env()) + + for (i in .rlang_purrr_index(.x, .right)) { + if (.p(.f(.x[[i]], ...))) { + return(.x[[i]]) + } + } + NULL +} +detect_index <- function(.x, .f, ..., .right = FALSE, .p = is_true) { + .p <- as_function(.p, env = global_env()) + .f <- as_function(.f, env = global_env()) + + for (i in .rlang_purrr_index(.x, .right)) { + if (.p(.f(.x[[i]], ...))) { + return(i) + } + } + 0L +} +.rlang_purrr_index <- function(x, right = FALSE) { + idx <- seq_along(x) + if (right) { + idx <- rev(idx) + } + idx +} + +list_c <- function(x) { + inject(c(!!!x)) +} + +# nocov end diff --git a/R/models.R b/R/models.R index 915c3ac4..8252420b 100644 --- a/R/models.R +++ b/R/models.R @@ -29,7 +29,7 @@ new_gptstudio_service <- function(service_name = character()) { #' @export list_available_models.openai <- function(service) { models <- - request_base("models") |> + request_base_openai("models") |> httr2::req_perform() |> httr2::resp_body_json() |> purrr::pluck("data") |> diff --git a/R/service-anthropic.R b/R/service-anthropic.R index eb9a71f0..d0b868cc 100644 --- a/R/service-anthropic.R +++ b/R/service-anthropic.R @@ -1,52 +1,3 @@ -#' Base for a request to the Anthropic API -#' -#' This function sends a request to the Anthropic API endpoint and -#' authenticates with an API key. -#' -#' @param key String containing an Anthropic API key. Defaults to the -#' ANTHROPIC_API_KEY environmental variable if not specified. -#' @return An httr2 request object -request_base_anthropic <- function(key = Sys.getenv("ANTHROPIC_API_KEY")) { - request("https://api.anthropic.com/v1/messages") |> - req_headers( - "anthropic-version" = "2023-06-01", - "content-type" = "application/json", - "x-api-key" = key - ) -} - -#' A function that sends a request to the Anthropic API and returns the -#' response. -#' -#' @param request_body A list that contains the parameters for the task. -#' @param key String containing an Anthropic API key. Defaults -#' to the ANTHROPIC_API_KEY environmental variable if not specified. -#' -#' @return The response from the API. -#' -query_api_anthropic <- function(request_body, - key = Sys.getenv("ANTHROPIC_API_KEY")) { - response <- request_base_anthropic(key) |> - req_body_json(data = request_body) |> - req_retry(max_tries = 3) |> - req_error(is_error = function(resp) FALSE) |> - req_perform() - - # error handling - if (resp_is_error(response)) { - status <- resp_status(response) # nolint - description <- resp_status_desc(response) # nolint - - cli::cli_abort(message = c( - "x" = "Anthropic API request failed. Error {status} - {description}", - "i" = "Visit the Anthropic API documentation for more details" - )) - } - - response |> - resp_body_json(simplifyVector = TRUE) -} - #' Generate text completions using Anthropic's API #' #' @param prompt The prompt for generating completions @@ -56,29 +7,102 @@ query_api_anthropic <- function(request_body, #' @param max_tokens The maximum number of tokens to generate. Defaults to 256. #' @param key The API key for accessing Anthropic's API. By default, the #' function will try to use the `ANTHROPIC_API_KEY` environment variable. +#' @param stream Whether to stream the response, defaults to FALSE. +#' @param shiny_session A Shiny session object to send messages to the client +#' @param user_prompt A user prompt to send to the client #' #' @return A list with the generated completions and other information returned #' by the API. #' @examples #' \dontrun{ #' create_completion_anthropic( -#' prompt = "\n\nHuman: Hello, world!\n\nAssistant:", +#' prompt = list(list(role = "user", content = "Hello")), #' model = "claude-3-haiku-20240307", #' max_tokens = 1028 #' ) #' } #' @export create_completion_anthropic <- function(prompt = list(list(role = "user", content = "Hello")), - system = NULL, model = "claude-3-5-sonnet-20240620", max_tokens = 1028, - key = Sys.getenv("ANTHROPIC_API_KEY")) { + key = Sys.getenv("ANTHROPIC_API_KEY"), + stream = FALSE, + system = NULL, + shiny_session = NULL, + user_prompt = NULL) { request_body <- list( messages = prompt, model = model, max_tokens = max_tokens, - system = system + system = system, + stream = stream ) |> purrr::compact() - answer <- query_api_anthropic(request_body = request_body, key = key) - answer |> purrr::pluck("content", "text") + + query_api_anthropic( + request_body = request_body, + key = key, + stream = stream, + shiny_session = shiny_session, + user_prompt = user_prompt + ) +} + +request_base_anthropic <- function(key = Sys.getenv("ANTHROPIC_API_KEY")) { + request("https://api.anthropic.com/v1/messages") |> + req_headers( + "anthropic-version" = "2023-06-01", + "content-type" = "application/json", + "x-api-key" = key + ) +} + +query_api_anthropic <- function(request_body, + key = Sys.getenv("ANTHROPIC_API_KEY"), + stream = FALSE, + shiny_session = NULL, + user_prompt = NULL) { + req <- request_base_anthropic(key) |> + req_body_json(data = request_body) |> + req_retry(max_tries = 3) |> + req_error(is_error = function(resp) FALSE) + + if (is_true(stream)) { + resp <- req_perform_connection(req, mode = "text") + on.exit(close(resp)) + results <- list() + repeat({ + event <- resp_stream_sse(resp) + if (is.null(event) || event$data == "[DONE]") { + break + } + json <- jsonlite::parse_json(event$data) + results <- merge_dicts(results, json) + if (!is.null(shiny_session)) { + # any communication with JS should be handled here!! + shiny_session$sendCustomMessage( + type = "render-stream", + message = list( + user = user_prompt, + assistant = shiny::markdown(results$delta$text) + ) + ) + } else { + cat(json$delta$text) + } + }) + invisible(results$delta$text) + } else { + response <- req |> req_perform() + if (resp_is_error(response)) { + status <- resp_status(response) + description <- resp_status_desc(response) + cli::cli_abort(c( + "x" = "Anthropic API request failed. Error {status} - {description}", + "i" = "Visit the Anthropic API documentation for more details" + )) + } + response |> + resp_body_json(simplifyVector = TRUE) |> + purrr::pluck("content", "text") + } } diff --git a/R/service-azure_openai.R b/R/service-azure_openai.R index a24dce46..88a88094 100644 --- a/R/service-azure_openai.R +++ b/R/service-azure_openai.R @@ -1,47 +1,56 @@ #' Generate text using Azure OpenAI's API #' -#' @description Use this function to generate text completions using OpenAI's -#' API. +#' @description Use this function to generate text completions using Azure OpenAI's API. #' -#' @param prompt a list to use as the prompt for generating -#' completions -#' @param task a character string for the API task (e.g. "completions"). -#' Defaults to the Azure OpenAI -#' task from environment variables if not specified. -#' @param base_url a character string for the base url. It defaults to the Azure -#' OpenAI endpoint from environment variables if not specified. -#' @param deployment_name a character string for the deployment name. It will -#' default to the Azure OpenAI deployment name from environment variables if -#' not specified. -#' @param api_key a character string for the API key. It will default to the Azure +#' @param prompt A list of messages to use as the prompt for generating completions. +#' Each message should be a list with 'role' and 'content' elements. +#' @param model A character string for the model to use. Defaults to the Azure OpenAI +#' deployment name from environment variables if not specified. +#' @param api_key A character string for the API key. It will default to the Azure #' OpenAI API key from your environment variables if not specified. -#' @param api_version a character string for the API version. It will default to -#' the Azure OpenAI API version from your environment variables if not -#' specified. -#' @return a list with the generated completions and other information returned +#' @param task A character string for the API task. Defaults to "chat/completions". +#' @param stream Whether to stream the response, defaults to FALSE. +#' @param shiny_session A Shiny session object to send messages to the client +#' @param user_prompt A user prompt to send to the client +#' @param base_url A character string for the base url. It defaults to the Azure +#' OpenAI endpoint from environment variables if not specified. +#' @param api_version A character string for the API version. It defaults to +#' the Azure OpenAI API version from your environment variables if not specified. +#' +#' @return A list with the generated completions and other information returned #' by the API #' #' @export -create_completion_azure_openai <- - function(prompt, - task = Sys.getenv("AZURE_OPENAI_TASK"), - base_url = Sys.getenv("AZURE_OPENAI_ENDPOINT"), - deployment_name = Sys.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), - api_key = Sys.getenv("AZURE_OPENAI_API_KEY"), - api_version = Sys.getenv("AZURE_OPENAI_API_VERSION")) { - request_body <- list(list(role = "user", content = prompt)) - query_api_azure_openai( - task, - request_body, - base_url, - deployment_name, - api_key, - api_version - ) - } +create_chat_azure_openai <- function(prompt = list(list(role = "user", content = "Hello")), + model = Sys.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), + api_key = Sys.getenv("AZURE_OPENAI_API_KEY"), + task = "chat/completions", + stream = FALSE, + shiny_session = NULL, + user_prompt = NULL, + base_url = Sys.getenv("AZURE_OPENAI_ENDPOINT"), + api_version = Sys.getenv("AZURE_OPENAI_API_VERSION")) { + request_body <- list( + messages = prompt, + model = model, + stream = stream + ) |> purrr::compact() + + query_api_azure_openai( + task = task, + request_body = request_body, + base_url = base_url, + deployment_name = model, + api_key = api_key, + api_version = api_version, + stream = stream, + shiny_session = shiny_session, + user_prompt = user_prompt + ) +} request_base_azure_openai <- - function(task = Sys.getenv("AZURE_OPENAI_TASK"), + function(task, base_url = Sys.getenv("AZURE_OPENAI_ENDPOINT"), deployment_name = Sys.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), api_key = Sys.getenv("AZURE_OPENAI_API_KEY"), @@ -72,39 +81,63 @@ request_base_azure_openai <- } query_api_azure_openai <- - function(task = Sys.getenv("AZURE_OPENAI_TASK"), + function(task, request_body, base_url = Sys.getenv("AZURE_OPENAI_ENDPOINT"), deployment_name = Sys.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), api_key = Sys.getenv("AZURE_OPENAI_API_KEY"), - api_version = Sys.getenv("AZURE_OPENAI_API_VERSION")) { - response <- - request_base_azure_openai( - task, - base_url, - deployment_name, - api_key, - api_version - ) |> - req_body_json(list(messages = request_body)) |> + api_version = Sys.getenv("AZURE_OPENAI_API_VERSION"), + stream = FALSE, + shiny_session = NULL, + user_prompt = NULL) { + req <- request_base_azure_openai( + task, + base_url, + deployment_name, + api_key, + api_version + ) |> + req_body_json(data = request_body) |> req_retry(max_tries = 3) |> - req_error(is_error = function(resp) FALSE) |> - req_perform() + req_error(is_error = function(resp) FALSE) - # error handling - if (resp_is_error(response)) { - # nolint start - status <- resp_status(response) - description <- resp_status_desc(response) - cli_abort(message = c( - "x" = "Azure OpenAI API request failed. Error {status} - {description}", - "i" = "Visit the {.href [Azure OpenAi Error code guidance](https://help.openai.com/en/articles/6891839-api-error-code-guidance)} for more details", - "i" = "You can also visit the {.href [API documentation](https://platform.openai.com/docs/guides/error-codes/api-errors)}" - )) - # nolint end + if (is_true(stream)) { + resp <- req |> req_perform_connection(mode = "text") + on.exit(close(resp)) + results <- list() + repeat({ + event <- resp_stream_sse(resp) + if (is.null(event) || event$data == "[DONE]") { + break + } + json <- jsonlite::parse_json(event$data) + results <- merge_dicts(results, json) + if (!is.null(shiny_session)) { + shiny_session$sendCustomMessage( + type = "render-stream", + message = list( + user = user_prompt, + assistant = shiny::markdown(results$choices[[1]]$delta$content) + ) + ) + } else { + cat(json$choices[[1]]$delta$content) + } + }) + invisible(results$choices[[1]]$delta$content) + } else { + resp <- req |> req_perform() + if (resp_is_error(resp)) { + status <- resp_status(resp) + description <- resp_status_desc(resp) + cli::cli_abort(c( + "x" = "Azure OpenAI API request failed. Error {status} - {description}", + "i" = "Visit the {.href [Azure OpenAI Error code guidance](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#error-codes)} for more details" # nolint + )) + } + results <- resp |> resp_body_json() + results$choices[[1]]$message$content } - response |> - resp_body_json() } retrieve_azure_token <- function() { @@ -133,28 +166,3 @@ retrieve_azure_token <- function() { invisible(token$token$credentials$access_token) } - - -stream_azure_openai <- function(messages = list(list(role = "user", content = "hi there")), - element_callback = cat) { - body <- list( - messages = messages, - stream = TRUE - ) - - response <- - request_base_azure_openai() |> - req_body_json(data = body) |> - req_retry(max_tries = 3) |> - req_error(is_error = function(resp) FALSE) |> - req_perform_stream( - callback = \(x) { - element <- rawToChar(x) - element_callback(element) - TRUE - }, - round = "line" - ) - - invisible(response) -} diff --git a/R/service-google.R b/R/service-google.R index 0e470354..d2331219 100644 --- a/R/service-google.R +++ b/R/service-google.R @@ -1,106 +1,75 @@ -#' Base for a request to the Google AI Studio API -#' -#' This function sends a request to a specific Google AI Studio API endpoint and -#' authenticates with an API key. -#' -#' @param model character string specifying a Google AI Studio API model -#' @param key String containing a Google AI Studio API key. Defaults to the -#' GOOGLE_API_KEY environmental variable if not specified. -#' @return An httr2 request object -request_base_google <- function(model, key = Sys.getenv("GOOGLE_API_KEY")) { - url <- glue::glue( - "https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent" - ) - - request(url) |> - req_url_query(key = key) -} - - -#' A function that sends a request to the Google AI Studio API and returns the -#' response. -#' -#' @param model A character string that specifies the model to send to the API. -#' @param request_body A list that contains the parameters for the task. -#' @param key String containing a Google AI Studio API key. Defaults -#' to the GOOGLE_API_KEY environmental variable if not specified. -#' -#' @return The response from the API. -#' -query_api_google <- function(model, - request_body, - key = Sys.getenv("GOOGLE_API_KEY")) { - response <- request_base_google(model, key) |> - req_body_json(data = request_body) |> - req_retry(max_tries = 3) |> - req_error(is_error = function(resp) FALSE) |> - req_perform() - - # error handling - if (resp_is_error(response)) { - status <- resp_status(response) # nolint - description <- resp_status_desc(response) # nolint - - cli::cli_abort(message = c( - "x" = "Google AI Studio API request failed. Error {status} - {description}", - "i" = "Visit the Google AI Studio API documentation for more details" - )) - } - - response |> - resp_body_json() -} - #' Generate text completions using Google AI Studio's API #' #' @param prompt The prompt for generating completions -#' @param model The model to use for generating text. By default, the -#' function will try to use "text-bison-001" -#' @param key The API key for accessing Google AI Studio's API. By default, the -#' function will try to use the `GOOGLE_API_KEY` environment variable. +#' @param model The model to use for generating text. By default, the function +#' will try to use "text-bison-001" +#' @param api_key The API key for accessing Google AI Studio's API. By default, +#' the function will try to use the `GOOGLE_API_KEY` environment variable. #' #' @return A list with the generated completions and other information returned #' by the API. #' @examples #' \dontrun{ -#' create_completion_google( +#' create_chat_google( #' prompt = "Write a story about a magic backpack", #' temperature = 1.0, #' candidate_count = 3 #' ) #' } #' @export -create_completion_google <- function(prompt, - model = "gemini-pro", - key = Sys.getenv("GOOGLE_API_KEY")) { - # Constructing the request body as per the API documentation +create_chat_google <- function(prompt = list(list(role = "user", content = "tell me a joke")), + model = "gemini-pro", + api_key = Sys.getenv("GOOGLE_API_KEY")) { + + messages <- openai_to_google_format(prompt) + request_body <- list( - contents = list( - list( - parts = list( - list( - text = prompt - ) - ) - ) - ) + # system_instruction = messages$system_instruction, + contents = messages$contents ) - response <- query_api_google(model = model, request_body = request_body, key = key) + query_api_google(model = model, + request_body = request_body, + api_key = api_key) +} + +request_base_google <- function(model, + api_key = Sys.getenv("GOOGLE_API_KEY")) { + request("https://generativelanguage.googleapis.com/v1beta/models") |> + req_url_path_append(glue("{model}:generateContent")) |> + req_url_query(key = api_key) +} + +query_api_google <- function(request_body, + api_key = Sys.getenv("GOOGLE_API_KEY"), + model) { + resp <- + request_base_google(model = model, api_key = api_key) |> + req_body_json(data = request_body, auto_unbox = TRUE) |> + req_retry(max_tries = 3) |> + req_error(is_error = function(resp) FALSE) |> + req_perform() + + if (resp_is_error(resp)) { + status <- resp_status(resp) # nolint + description <- resp_status_desc(resp) # nolint - # Assuming the response structure follows the API documentation example, parsing it accordingly. - # Please adjust if the actual API response has a different structure. - purrr::map_chr(response$candidates, ~ .x$content$parts[[1]]$text) + cli::cli_abort(c( + "x" = "Google AI Studio API request failed. Error {status} - {description}", + "i" = "Visit the Google AI Studio API documentation for more details" + )) + } + results <- resp |> resp_body_json() + results$candidates[[1]]$content$parts[[1]]$text } -get_available_models_google <- function(key = Sys.getenv("GOOGLE_API_KEY")) { +get_available_models_google <- function(api_key = Sys.getenv("GOOGLE_API_KEY")) { response <- request("https://generativelanguage.googleapis.com/v1beta") |> req_url_path_append("models") |> - req_url_query(key = key) |> + req_url_query(key = api_key) |> req_perform() - # error handling if (resp_is_error(response)) { status <- resp_status(response) # nolint description <- resp_status_desc(response) # nolint @@ -118,3 +87,24 @@ get_available_models_google <- function(key = Sys.getenv("GOOGLE_API_KEY")) { models$name |> stringr::str_remove("models/") } + +openai_to_google_format <- function(openai_messages) { + google_format <- list(contents = list()) + + for (message in openai_messages) { + role <- message$role + content <- message$content + + if (role == "system") { + google_format$system_instruction <- list(parts = list(text = content)) + } else if (role %in% c("user", "assistant")) { + google_role <- ifelse(role == "user", "user", "model") + google_format$contents <- c(google_format$contents, + list(list( + role = google_role, + parts = list(list(text = content)) + ))) + } + } + invisible(google_format) +} diff --git a/R/service-ollama.R b/R/service-ollama.R index 0a31a5cd..885e7bbf 100644 --- a/R/service-ollama.R +++ b/R/service-ollama.R @@ -1,142 +1,123 @@ -ollama_api_url <- function() { - Sys.getenv("OLLAMA_HOST", "http://localhost:11434") +#' Generate text using Ollama's API +#' +#' @description Use this function to generate text completions using Ollama's API. +#' +#' @param prompt A list of messages to use as the prompt for generating completions. +#' Each message should be a list with 'role' and 'content' elements. +#' @param model A character string for the model to use. +#' @param api_url A character string for the API url. It defaults to the Ollama +#' host from environment variables or "http://localhost:11434" if not specified. +#' @param stream Whether to stream the response, defaults to FALSE. +#' @param shiny_session A Shiny session object to send messages to the client +#' @param user_prompt A user prompt to send to the client +#' +#' @return The generated completion as a character string, or the full response if streaming. +#' +#' @export +create_chat_ollama <- function(prompt = list(list(role = "user", content = "Hello")), + model = "llama3.1:latest", + api_url = Sys.getenv("OLLAMA_HOST", "http://localhost:11434"), + stream = FALSE, + shiny_session = NULL, + user_prompt = NULL) { + request_body <- list( + model = model, + messages = prompt, + stream = stream + ) |> purrr::compact() + + query_api_ollama( + request_body = request_body, + api_url = api_url, + stream = stream, + shiny_session = shiny_session, + user_prompt = user_prompt + ) } -ollama_set_task <- function(task) { - ollama_api_url() |> - request() |> +request_base_ollama <- function(api_url = Sys.getenv("OLLAMA_HOST", "http://localhost:11434")) { + request(api_url) |> req_url_path_append("api") |> - req_url_path_append(task) + req_url_path_append("chat") +} + +query_api_ollama <- function(request_body, + api_url = Sys.getenv("OLLAMA_HOST", "http://localhost:11434"), + stream = FALSE, + shiny_session = NULL, + user_prompt = NULL) { + req <- request_base_ollama(api_url) |> + req_body_json(data = request_body) |> + req_retry(max_tries = 3) |> + req_error(is_error = function(resp) FALSE) + + if (is_true(stream)) { + resp <- req |> req_perform_connection(mode = "text") + on.exit(close(resp)) + results <- list() + repeat({ + event <- resp_stream_lines(resp) + json <- jsonlite::parse_json(event) + if (is_true(json$done)) { + break + } + results <- merge_dicts(results, json) + if (!is.null(shiny_session)) { + # any communication with JS should be handled here!! + shiny_session$sendCustomMessage( + type = "render-stream", + message = list( + user = user_prompt, + assistant = shiny::markdown(results$message$content) + ) + ) + } else { + cat(json$message$content) + } + }) + invisible(results$message$content) + } else { + resp <- req |> req_perform() + if (resp_is_error(resp)) { + status <- resp_status(resp) + description <- resp_status_desc(resp) + cli::cli_abort(c( + "x" = "Ollama API request failed. Error {status} - {description}", + "i" = "Check your Ollama setup and try again." + )) + } + results <- resp |> resp_body_json() + results$message$content + } } -ollama_list <- function() { - ollama_set_task("tags") |> +# Helper functions +ollama_list <- function(api_url = Sys.getenv("OLLAMA_HOST", "http://localhost:11434")) { + request(api_url) |> + req_url_path_append("api") |> + req_url_path_append("tags") |> req_perform() |> resp_body_json() } -ollama_is_available <- function(verbose = FALSE) { - request <- ollama_api_url() |> - request() - +ollama_is_available <- function(api_url = Sys.getenv("OLLAMA_HOST", "http://localhost:11434"), verbose = FALSE) { check_value <- logical(1) - rlang::try_fetch( { - response <- req_perform(request) |> + response <- request(api_url) |> + req_perform() |> resp_body_string() - if (verbose) cli::cli_alert_success(response) check_value <- TRUE }, error = function(cnd) { if (inherits(cnd, "httr2_failure")) { - if (verbose) cli::cli_alert_danger("Couldn't connect to Ollama in {.url {ollama_api_url()}}. Is it running there?") # nolint + if (verbose) cli::cli_alert_danger("Couldn't connect to Ollama in {.url {api_url}}. Is it running there?") } else { if (verbose) cli::cli_alert_danger(cnd) } - check_value <- FALSE # nolint + check_value <- FALSE } ) - invisible(check_value) } - -body_to_json_str <- function(x) { - to_json_params <- rlang::list2(x = x$data, !!!x$params) - do.call(jsonlite::toJSON, to_json_params) -} - - -ollama_perform_stream <- function(request, parser) { - req_perform_stream( - request, - callback = function(x) { - parser$parse_ndjson(rawToChar(x)) - TRUE - }, - buffer_kb = 0.01, - round = "line" - ) -} - -ollama_chat <- function(model, messages, stream = TRUE, shiny_session = NULL, user_prompt = NULL) { - body <- list( - model = model, - messages = messages, - stream = stream - ) - - request <- ollama_set_task("chat") |> - req_body_json(data = body) - - - if (stream) { - parser <- OllamaStreamParser$new( - session = shiny_session, - user_prompt = user_prompt - ) - - ollama_perform_stream( - request = request, - parser = parser - ) - - last_line <- parser$lines[[length(parser$lines)]] - - last_line$message <- list( - role = "assistant", - content = parser$value - ) - - last_line - } else { - request |> - req_perform() |> - resp_body_json() - } -} - -OllamaStreamParser <- R6::R6Class( # nolint - classname = "OllamaStreamParser", - portable = TRUE, - public = list( - lines = NULL, - value = NULL, - shinySession = NULL, - user_message = NULL, - append_parsed_line = function(line) { - self$value <- paste0(self$value, line$message$content) - self$lines <- c(self$lines, list(line)) - - if (!is.null(self$shinySession)) { - # any communication with JS should be handled here!! - self$shinySession$sendCustomMessage( - type = "render-stream", - message = list( - user = self$user_message, - assistant = shiny::markdown(self$value) - ) - ) - } - - invisible(self) - }, - parse_ndjson = function(ndjson, pagesize = 500, verbose = FALSE, simplifyDataFrame = FALSE) { # nolint - jsonlite::stream_in( - con = textConnection(ndjson), - pagesize = pagesize, - verbose = verbose, - simplifyDataFrame = simplifyDataFrame, - handler = function(x) lapply(x, self$append_parsed_line) - ) - - invisible(self) - }, - initialize = function(session = NULL, user_prompt = NULL) { - self$lines <- list() - self$shinySession <- session - self$user_message <- shiny::markdown(user_prompt) - } - ) -) diff --git a/R/service-openai.R b/R/service-openai.R new file mode 100644 index 00000000..80d54c2d --- /dev/null +++ b/R/service-openai.R @@ -0,0 +1,163 @@ +#' Generate text completions using OpenAI's API for Chat +#' +#' @param model The model to use for generating text +#' @param prompt The prompt for generating completions +#' @param api_key The API key for accessing OpenAI's API. By default, the +#' function will try to use the `OPENAI_API_KEY` environment variable. +#' @param task The task that specifies the API url to use, defaults to +#' "completions" and "chat/completions" is required for ChatGPT model. +#' @param stream Whether to stream the response, defaults to FALSE. +#' @param shiny_session A Shiny session object to send messages to the client +#' @param user_prompt A user prompt to send to the client +#' +#' @return A list with the generated completions and other information returned +#' by the API. +#' @examples +#' \dontrun{ +#' openai_create_completion( +#' model = "gpt-4o", +#' prompt = "Hello world!" +#' ) +#' } +#' @export +create_chat_openai <- function(prompt = list(list(role = "user", content = "Hello")), + model = "gpt-4o", + api_key = Sys.getenv("OPENAI_API_KEY"), + task = "chat/completions", + stream = FALSE, + shiny_session = NULL, + user_prompt = NULL) { + request_body <- list( + messages = prompt, + model = model, + stream = stream + ) |> purrr::compact() + + query_api_openai(task = task, + request_body = request_body, + api_key = api_key, + stream = stream, + shiny_session = shiny_session, + user_prompt = user_prompt) +} + + +request_base_openai <- function(task, api_key = Sys.getenv("OPENAI_API_KEY")) { + if (!task %in% get_available_endpoints()) { + cli::cli_abort(message = c( + "{.var task} must be a supported endpoint", + "i" = "Run {.run gptstudio::get_available_endpoints()} to get a list of supported endpoints" + )) + } + request(getOption("gptstudio.openai_url")) |> + req_url_path_append(task) |> + req_auth_bearer_token(token = api_key) +} + +query_api_openai <- function(task, + request_body, + api_key = Sys.getenv("OPENAI_API_KEY"), + stream = FALSE, + shiny_session = NULL, + user_prompt = NULL) { + req <- request_base_openai(task, api_key = api_key) |> + req_body_json(data = request_body) |> + req_retry(max_tries = 3) |> + req_error(is_error = function(resp) FALSE) + + if (is_true(stream)) { + resp <- req |> req_perform_connection(mode = "text") + on.exit(close(resp)) + results <- list() + repeat({ + event <- resp_stream_sse(resp) + if (is.null(event) || event$data == "[DONE]") { + break + } + json <- jsonlite::parse_json(event$data) + results <- merge_dicts(results, json) + if (!is.null(shiny_session)) { + # any communication with JS should be handled here!! + shiny_session$sendCustomMessage( + type = "render-stream", + message = list( + user = user_prompt, + assistant = shiny::markdown(results$choices[[1]]$delta$content) + ) + ) + } else { + cat(json$choices[[1]]$delta$content) + } + }) + invisible(results$choices[[1]]$delta$content) + } else { + resp <- req |> req_perform() + if (resp_is_error(resp)) { + status <- resp_status(resp) + description <- resp_status_desc(resp) + + # nolint start + cli::cli_abort(c( + "x" = "OpenAI API request failed. Error {status} - {description}", + "i" = "Visit the {.href [OpenAi Error code guidance](https://help.openai.com/en/articles/6891839-api-error-code-guidance)} for more details", + "i" = "You can also visit the {.href [API documentation](https://platform.openai.com/docs/guides/error-codes/api-errors)}" + )) + # nolint end + } + results <- resp |> resp_body_json() + results$choices[[1]]$message$content + } +} + +#' List supported endpoints +#' +#' Get a list of the endpoints supported by gptstudio. +#' +#' @return A character vector +#' @export +#' +#' @examples +#' get_available_endpoints() +get_available_endpoints <- function() { + c("completions", "chat/completions", "edits", "embeddings", "models") +} + +encode_image <- function(image_path) { + image_file <- file(image_path, "rb") + image_data <- readBin(image_file, "raw", file.info(image_path)$size) + close(image_file) + base64_image <- jsonlite::base64_enc(image_data) + paste0("data:image/jpeg;base64,", base64_image) +} + +create_image_chat_openai <- function(image_path, + prompt = "What is this image?", + model = getOption("gptstudio.model"), + api_key = Sys.getenv("OPENAI_API_KEY"), + task = "chat/completions") { + image_data <- encode_image(image_path) + body <- list( + model = model, + messages = + list( + list( + role = "user", + content = list( + list( + type = "text", + text = prompt + ), + list( + type = "image_url", + image_url = list(url = image_data) + ) + ) + ) + ) + ) + query_api_openai( + task = task, + request_body = body, + api_key = api_key + ) +} diff --git a/R/service-openai_api_calls.R b/R/service-openai_api_calls.R deleted file mode 100644 index f14f4f47..00000000 --- a/R/service-openai_api_calls.R +++ /dev/null @@ -1,154 +0,0 @@ -#' Base for a request to the OPENAI API -#' -#' This function sends a request to a specific OpenAI API \code{task} endpoint at -#' the base URL \code{https://api.openai.com/v1}, and authenticates with -#' an API key using a Bearer token. -#' -#' @param task character string specifying an OpenAI API endpoint task -#' @param token String containing an OpenAI API key. Defaults to the OPENAI_API_KEY -#' environmental variable if not specified. -#' @return An httr2 request object -request_base <- function(task, token = Sys.getenv("OPENAI_API_KEY")) { - if (!task %in% get_available_endpoints()) { - cli::cli_abort(message = c( - "{.var task} must be a supported endpoint", - "i" = "Run {.run gptstudio::get_available_endpoints()} to get a list of supported endpoints" - )) - } - request(getOption("gptstudio.openai_url")) |> - req_url_path_append(task) |> - req_auth_bearer_token(token = token) -} - -#' Generate text completions using OpenAI's API for Chat -#' -#' @param model The model to use for generating text -#' @param prompt The prompt for generating completions -#' @param openai_api_key The API key for accessing OpenAI's API. By default, the -#' function will try to use the `OPENAI_API_KEY` environment variable. -#' @param task The task that specifies the API url to use, defaults to -#' "completions" and "chat/completions" is required for ChatGPT model. -#' -#' @return A list with the generated completions and other information returned -#' by the API. -#' @examples -#' \dontrun{ -#' openai_create_completion( -#' model = "text-davinci-002", -#' prompt = "Hello world!" -#' ) -#' } -#' @export -openai_create_chat_completion <- - function(prompt = "<|endoftext|>", - model = getOption("gptstudio.model"), - openai_api_key = Sys.getenv("OPENAI_API_KEY"), - task = "chat/completions") { - if (is_string(prompt)) { - prompt <- list( - list( - role = "user", - content = prompt - ) - ) - } - - body <- list( - model = model, - messages = prompt - ) - - query_api_openai(task = task, request_body = body, openai_api_key = openai_api_key) - } - - -#' A function that sends a request to the OpenAI API and returns the response. -#' -#' @param task A character string that specifies the task to send to the API. -#' @param request_body A list that contains the parameters for the task. -#' @param openai_api_key String containing an OpenAI API key. Defaults to the OPENAI_API_KEY -#' environmental variable if not specified. -#' -#' @return The response from the API. -#' -query_api_openai <- function(task, request_body, openai_api_key = Sys.getenv("OPENAI_API_KEY")) { - response <- request_base(task, token = openai_api_key) |> - req_body_json(data = request_body) |> - req_retry(max_tries = 3) |> - req_error(is_error = function(resp) FALSE) |> - req_perform() - - # error handling - if (resp_is_error(response)) { - status <- resp_status(response) # nolint - description <- resp_status_desc(response) # nolint - - # nolint start - cli::cli_abort(message = c( - "x" = "OpenAI API request failed. Error {status} - {description}", - "i" = "Visit the {.href [OpenAi Error code guidance](https://help.openai.com/en/articles/6891839-api-error-code-guidance)} for more details", - "i" = "You can also visit the {.href [API documentation](https://platform.openai.com/docs/guides/error-codes/api-errors)}" - )) - # nolint end - } - - response |> - resp_body_json() -} - -#' List supported endpoints -#' -#' Get a list of the endpoints supported by gptstudio. -#' -#' @return A character vector -#' @export -#' -#' @examples -#' get_available_endpoints() -get_available_endpoints <- function() { - c("completions", "chat/completions", "edits", "embeddings", "models") -} - -#' Encode an image file to base64 -#' -#' @param image_path String containing the path to the image file -#' @return A base64 encoded string of the image -encode_image <- function(image_path) { - image_file <- file(image_path, "rb") - image_data <- readBin(image_file, "raw", file.info(image_path)$size) - close(image_file) - base64_image <- jsonlite::base64_enc(image_data) - paste0("data:image/jpeg;base64,", base64_image) -} - -create_image_chat_openai <- function(image_path, - prompt = "What is this image?", - model = getOption("gptstudio.model"), - openai_api_key = Sys.getenv("OPENAI_API_KEY"), - task = "chat/completions") { - image_data <- encode_image(image_path) - body <- list( - model = model, - messages = - list( - list( - role = "user", - content = list( - list( - type = "text", - text = prompt - ), - list( - type = "image_url", - image_url = list(url = image_data) - ) - ) - ) - ) - ) - query_api_openai( - task = task, - request_body = body, - openai_api_key = openai_api_key - ) -} diff --git a/R/service-openai_streaming.R b/R/service-openai_streaming.R deleted file mode 100644 index 03cf5449..00000000 --- a/R/service-openai_streaming.R +++ /dev/null @@ -1,131 +0,0 @@ -#' Stream Chat Completion -#' -#' `stream_chat_completion` sends the prepared chat completion request to the -#' OpenAI API and retrieves the streamed response. -#' -#' @param messages A list of messages in the conversation, -#' including the current user prompt (optional). -#' @param element_callback A callback function to handle each element -#' of the streamed response (optional). -#' @param model A character string specifying the model to use for chat completion. -#' The default model is "gpt-4o-mini". -#' @param openai_api_key A character string of the OpenAI API key. -#' By default, it is fetched from the "OPENAI_API_KEY" environment variable. -#' Please note that the OpenAI API key is sensitive information and should be -#' treated accordingly. -#' @return The same as `httr2::req_perform_stream` -stream_chat_completion <- - function(messages = list(list(role = "user", content = "Hi there!")), - element_callback = openai_handler, - model = "gpt-4o-mini", - openai_api_key = Sys.getenv("OPENAI_API_KEY")) { - url <- paste0(getOption("gptstudio.openai_url"), "/chat/completions") - - body <- list( - "model" = model, - "stream" = TRUE, - "messages" = messages - ) - - request(url) %>% - req_headers( - "Content-Type" = "application/json", - "Authorization" = paste0("Bearer ", openai_api_key) - ) %>% - req_body_json(body) %>% - req_perform_stream( - callback = function(x) { - element <- rawToChar(x) - element_callback(element) - TRUE - }, - round = "line", - buffer_kb = 0.01 - ) - } - -openai_handler <- function(x) { - lines <- stringr::str_split(x, "\n")[[1]] - lines <- lines[lines != ""] - lines <- stringr::str_replace_all(lines, "^data: ", "") - lines <- lines[lines != "[DONE]"] - if (length(lines) == 0) { - return() - } - json <- jsonlite::parse_json(lines) - if (!is.null(json$choices[[1]]$finish_reason)) { - return() - } else { - cat(json$choices[[1]]$delta$content) - } -} - -#' Stream handler for chat completions -#' -#' R6 class that allows to handle chat completions chunk by chunk. It also adds -#' methods to retrieve relevant data. This class DOES NOT make the request. -#' -#' Because `httr2::req_perform_stream` blocks the R console until the stream -#' finishes, this class can take a shiny session object to handle communication -#' with JS without recurring to a `shiny::observe` inside a module server. -#' -#' @param session The shiny session it will send the message to (optional). -#' @param user_prompt The prompt for the chat completion. Only to be displayed -#' in an HTML tag containing the prompt. (Optional). -#' @param parsed_event An already parsed server-sent event to append to the -#' events field. -#' @importFrom R6 R6Class -#' @importFrom jsonlite fromJSON -OpenaiStreamParser <- R6::R6Class( # nolint - classname = "OpenaiStreamParser", - inherit = SSEparser::SSEparser, - public = list( - #' @field shinySession Holds the `session` provided at initialization - shinySession = NULL, - #' @field user_prompt The `user_prompt` provided at initialization, - #' after being formatted with markdown. - user_prompt = NULL, - #' @field value The content of the stream. It updates constantly until the stream ends. - value = NULL, # this will be our buffer - #' @description Start a StreamHandler. Recommended to be assigned to the `stream_handler` name. - initialize = function(session = NULL, user_prompt = NULL) { - self$shinySession <- session - self$user_prompt <- user_prompt - self$value <- "" - super$initialize() - }, - - #' @description Overwrites `SSEparser$append_parsed_sse()` to be able to - #' send a custom message to a shiny session, escaping shiny's reactivity. - append_parsed_sse = function(parsed_event) { - # ----- here you can do whatever you want with the event data ----- - if (is.null(parsed_event$data) || parsed_event$data == "[DONE]") { - return() - } - - parsed_event$data <- jsonlite::fromJSON(parsed_event$data, - simplifyDataFrame = FALSE) - - if (length(parsed_event$data$choices) == 0) return() - - content <- parsed_event$data$choices[[1]]$delta$content - self$value <- paste0(self$value, content) - - if (!is.null(self$shinySession)) { - # any communication with JS should be handled here!! - self$shinySession$sendCustomMessage( - type = "render-stream", - message = list( - user = self$user_prompt, - assistant = shiny::markdown(self$value) - ) - ) - } - - # ----- END ---- - - self$events <- c(self$events, list(parsed_event)) - invisible(self) - } - ) -) diff --git a/R/utils.R b/R/utils.R new file mode 100644 index 00000000..eba06364 --- /dev/null +++ b/R/utils.R @@ -0,0 +1,72 @@ +# Taken from https://github.com/hadley/elmer, which was translated from +# https://github.com/langchain-ai/langchain/blob/master/libs/core/langchain_core/utils/_merge.py +merge_dicts <- function(left, right) { + for (right_k in names(right)) { + right_v <- right[[right_k]] + left_v <- left[[right_k]] + + if (is.null(right_v)) { + left[right_k] <- list(NULL) + } else if (is.null(left_v)) { + left[[right_k]] <- right_v + } else if (identical(left_v, right_v)) { + next + } else if (is.character(left_v)) { + left[[right_k]] <- paste0(left_v, right_v) + } else if (is.list(left_v)) { + if (!is.null(names(right_v))) { + left[[right_k]] <- merge_dicts(left_v, right_v) + } else { + left[[right_k]] <- merge_lists(left_v, right_v) + } + } else if (!identical(class(left_v), class(right_v))) { + stop(paste0("additional_kwargs['", right_k, "'] already exists in this message, but with a different type.")) + } else { + stop(paste0( + "Additional kwargs key ", + right_k, + " already exists in left dict and value has unsupported type ", + class(left[[right_k]]), + "." + )) + } + } + + left +} + +merge_lists <- function(left, right) { + + if (is.null(right)) { + return(left) + } else if (is.null(left)) { + return(right) + } + + for (e in right) { + idx <- find_index(left, e) + if (is.na(idx)) { + left <- c(left, list(e)) + } else { + # If a top-level "type" has been set for a chunk, it should no + # longer be overridden by the "type" field in future chunks. + if (!is.null(left[[idx]]$type) && !is.null(e$type)) { + e$type <- NULL + } + left[[idx]] <- merge_dicts(left[[idx]], e) + } + } + left +} + +find_index <- function(left, e_right) { + if (!is.list(e_right) || !has_name(e_right, "index") || !is.numeric(e_right$index)) { + return(NA) + } + + matches_idx <- map_lgl(left, function(e_left) e_left$index == e_right$index) + if (sum(matches_idx) != 1) { + return(NA) + } + which(matches_idx)[[1]] +} diff --git a/man/OpenaiStreamParser.Rd b/man/OpenaiStreamParser.Rd deleted file mode 100644 index 603de9bb..00000000 --- a/man/OpenaiStreamParser.Rd +++ /dev/null @@ -1,105 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-openai_streaming.R -\name{OpenaiStreamParser} -\alias{OpenaiStreamParser} -\title{Stream handler for chat completions} -\description{ -Stream handler for chat completions - -Stream handler for chat completions -} -\details{ -R6 class that allows to handle chat completions chunk by chunk. It also adds -methods to retrieve relevant data. This class DOES NOT make the request. - -Because \code{httr2::req_perform_stream} blocks the R console until the stream -finishes, this class can take a shiny session object to handle communication -with JS without recurring to a \code{shiny::observe} inside a module server. -} -\section{Super class}{ -\code{\link[SSEparser:SSEparser]{SSEparser::SSEparser}} -> \code{OpenaiStreamParser} -} -\section{Public fields}{ -\if{html}{\out{
}} -\describe{ -\item{\code{shinySession}}{Holds the \code{session} provided at initialization} - -\item{\code{user_prompt}}{The \code{user_prompt} provided at initialization, -after being formatted with markdown.} - -\item{\code{value}}{The content of the stream. It updates constantly until the stream ends.} -} -\if{html}{\out{
}} -} -\section{Methods}{ -\subsection{Public methods}{ -\itemize{ -\item \href{#method-OpenaiStreamParser-new}{\code{OpenaiStreamParser$new()}} -\item \href{#method-OpenaiStreamParser-append_parsed_sse}{\code{OpenaiStreamParser$append_parsed_sse()}} -\item \href{#method-OpenaiStreamParser-clone}{\code{OpenaiStreamParser$clone()}} -} -} -\if{html}{\out{ -
Inherited methods - -
-}} -\if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OpenaiStreamParser-new}{}}} -\subsection{Method \code{new()}}{ -Start a StreamHandler. Recommended to be assigned to the \code{stream_handler} name. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OpenaiStreamParser$new(session = NULL, user_prompt = NULL)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{session}}{The shiny session it will send the message to (optional).} - -\item{\code{user_prompt}}{The prompt for the chat completion. Only to be displayed -in an HTML tag containing the prompt. (Optional).} -} -\if{html}{\out{
}} -} -} -\if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OpenaiStreamParser-append_parsed_sse}{}}} -\subsection{Method \code{append_parsed_sse()}}{ -Overwrites \code{SSEparser$append_parsed_sse()} to be able to -send a custom message to a shiny session, escaping shiny's reactivity. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OpenaiStreamParser$append_parsed_sse(parsed_event)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{parsed_event}}{An already parsed server-sent event to append to the -events field.} -} -\if{html}{\out{
}} -} -} -\if{html}{\out{
}} -\if{html}{\out{}} -\if{latex}{\out{\hypertarget{method-OpenaiStreamParser-clone}{}}} -\subsection{Method \code{clone()}}{ -The objects of this class are cloneable with this method. -\subsection{Usage}{ -\if{html}{\out{
}}\preformatted{OpenaiStreamParser$clone(deep = FALSE)}\if{html}{\out{
}} -} - -\subsection{Arguments}{ -\if{html}{\out{
}} -\describe{ -\item{\code{deep}}{Whether to make a deep clone.} -} -\if{html}{\out{
}} -} -} -} diff --git a/man/create_chat_azure_openai.Rd b/man/create_chat_azure_openai.Rd new file mode 100644 index 00000000..498a547f --- /dev/null +++ b/man/create_chat_azure_openai.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/service-azure_openai.R +\name{create_chat_azure_openai} +\alias{create_chat_azure_openai} +\title{Generate text using Azure OpenAI's API} +\usage{ +create_chat_azure_openai( + prompt = list(list(role = "user", content = "Hello")), + model = Sys.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), + api_key = Sys.getenv("AZURE_OPENAI_API_KEY"), + task = "chat/completions", + stream = FALSE, + shiny_session = NULL, + user_prompt = NULL, + base_url = Sys.getenv("AZURE_OPENAI_ENDPOINT"), + api_version = Sys.getenv("AZURE_OPENAI_API_VERSION") +) +} +\arguments{ +\item{prompt}{A list of messages to use as the prompt for generating completions. +Each message should be a list with 'role' and 'content' elements.} + +\item{model}{A character string for the model to use. Defaults to the Azure OpenAI +deployment name from environment variables if not specified.} + +\item{api_key}{A character string for the API key. It will default to the Azure +OpenAI API key from your environment variables if not specified.} + +\item{task}{A character string for the API task. Defaults to "chat/completions".} + +\item{stream}{Whether to stream the response, defaults to FALSE.} + +\item{shiny_session}{A Shiny session object to send messages to the client} + +\item{user_prompt}{A user prompt to send to the client} + +\item{base_url}{A character string for the base url. It defaults to the Azure +OpenAI endpoint from environment variables if not specified.} + +\item{api_version}{A character string for the API version. It defaults to +the Azure OpenAI API version from your environment variables if not specified.} +} +\value{ +A list with the generated completions and other information returned +by the API +} +\description{ +Use this function to generate text completions using Azure OpenAI's API. +} diff --git a/man/create_completion_google.Rd b/man/create_chat_google.Rd similarity index 60% rename from man/create_completion_google.Rd rename to man/create_chat_google.Rd index 5154430d..613216cd 100644 --- a/man/create_completion_google.Rd +++ b/man/create_chat_google.Rd @@ -1,23 +1,23 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/service-google.R -\name{create_completion_google} -\alias{create_completion_google} +\name{create_chat_google} +\alias{create_chat_google} \title{Generate text completions using Google AI Studio's API} \usage{ -create_completion_google( - prompt, +create_chat_google( + prompt = list(list(role = "user", content = "tell me a joke")), model = "gemini-pro", - key = Sys.getenv("GOOGLE_API_KEY") + api_key = Sys.getenv("GOOGLE_API_KEY") ) } \arguments{ \item{prompt}{The prompt for generating completions} -\item{model}{The model to use for generating text. By default, the -function will try to use "text-bison-001"} +\item{model}{The model to use for generating text. By default, the function +will try to use "text-bison-001"} -\item{key}{The API key for accessing Google AI Studio's API. By default, the -function will try to use the \code{GOOGLE_API_KEY} environment variable.} +\item{api_key}{The API key for accessing Google AI Studio's API. By default, +the function will try to use the \code{GOOGLE_API_KEY} environment variable.} } \value{ A list with the generated completions and other information returned @@ -28,7 +28,7 @@ Generate text completions using Google AI Studio's API } \examples{ \dontrun{ -create_completion_google( +create_chat_google( prompt = "Write a story about a magic backpack", temperature = 1.0, candidate_count = 3 diff --git a/man/create_chat_ollama.Rd b/man/create_chat_ollama.Rd new file mode 100644 index 00000000..e31def2d --- /dev/null +++ b/man/create_chat_ollama.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/service-ollama.R +\name{create_chat_ollama} +\alias{create_chat_ollama} +\title{Generate text using Ollama's API} +\usage{ +create_chat_ollama( + prompt = list(list(role = "user", content = "Hello")), + model = "llama3.1:latest", + api_url = Sys.getenv("OLLAMA_HOST", "http://localhost:11434"), + stream = FALSE, + shiny_session = NULL, + user_prompt = NULL +) +} +\arguments{ +\item{prompt}{A list of messages to use as the prompt for generating completions. +Each message should be a list with 'role' and 'content' elements.} + +\item{model}{A character string for the model to use.} + +\item{api_url}{A character string for the API url. It defaults to the Ollama +host from environment variables or "http://localhost:11434" if not specified.} + +\item{stream}{Whether to stream the response, defaults to FALSE.} + +\item{shiny_session}{A Shiny session object to send messages to the client} + +\item{user_prompt}{A user prompt to send to the client} +} +\value{ +The generated completion as a character string, or the full response if streaming. +} +\description{ +Use this function to generate text completions using Ollama's API. +} diff --git a/man/openai_create_chat_completion.Rd b/man/create_chat_openai.Rd similarity index 52% rename from man/openai_create_chat_completion.Rd rename to man/create_chat_openai.Rd index d2eaec2a..8ca0736b 100644 --- a/man/openai_create_chat_completion.Rd +++ b/man/create_chat_openai.Rd @@ -1,14 +1,17 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-openai_api_calls.R -\name{openai_create_chat_completion} -\alias{openai_create_chat_completion} +% Please edit documentation in R/service-openai.R +\name{create_chat_openai} +\alias{create_chat_openai} \title{Generate text completions using OpenAI's API for Chat} \usage{ -openai_create_chat_completion( - prompt = "<|endoftext|>", - model = getOption("gptstudio.model"), - openai_api_key = Sys.getenv("OPENAI_API_KEY"), - task = "chat/completions" +create_chat_openai( + prompt = list(list(role = "user", content = "Hello")), + model = "gpt-4o", + api_key = Sys.getenv("OPENAI_API_KEY"), + task = "chat/completions", + stream = FALSE, + shiny_session = NULL, + user_prompt = NULL ) } \arguments{ @@ -16,11 +19,17 @@ openai_create_chat_completion( \item{model}{The model to use for generating text} -\item{openai_api_key}{The API key for accessing OpenAI's API. By default, the +\item{api_key}{The API key for accessing OpenAI's API. By default, the function will try to use the \code{OPENAI_API_KEY} environment variable.} \item{task}{The task that specifies the API url to use, defaults to "completions" and "chat/completions" is required for ChatGPT model.} + +\item{stream}{Whether to stream the response, defaults to FALSE.} + +\item{shiny_session}{A Shiny session object to send messages to the client} + +\item{user_prompt}{A user prompt to send to the client} } \value{ A list with the generated completions and other information returned @@ -32,7 +41,7 @@ Generate text completions using OpenAI's API for Chat \examples{ \dontrun{ openai_create_completion( - model = "text-davinci-002", + model = "gpt-4o", prompt = "Hello world!" ) } diff --git a/man/create_completion_anthropic.Rd b/man/create_completion_anthropic.Rd index 582b9d76..b196ec02 100644 --- a/man/create_completion_anthropic.Rd +++ b/man/create_completion_anthropic.Rd @@ -6,17 +6,18 @@ \usage{ create_completion_anthropic( prompt = list(list(role = "user", content = "Hello")), - system = NULL, model = "claude-3-5-sonnet-20240620", max_tokens = 1028, - key = Sys.getenv("ANTHROPIC_API_KEY") + key = Sys.getenv("ANTHROPIC_API_KEY"), + stream = FALSE, + system = NULL, + shiny_session = NULL, + user_prompt = NULL ) } \arguments{ \item{prompt}{The prompt for generating completions} -\item{system}{A system messages to instruct the model. Defaults to NULL.} - \item{model}{The model to use for generating text. By default, the function will try to use "claude-2.1".} @@ -24,6 +25,14 @@ function will try to use "claude-2.1".} \item{key}{The API key for accessing Anthropic's API. By default, the function will try to use the \code{ANTHROPIC_API_KEY} environment variable.} + +\item{stream}{Whether to stream the response, defaults to FALSE.} + +\item{system}{A system messages to instruct the model. Defaults to NULL.} + +\item{shiny_session}{A Shiny session object to send messages to the client} + +\item{user_prompt}{A user prompt to send to the client} } \value{ A list with the generated completions and other information returned @@ -35,7 +44,7 @@ Generate text completions using Anthropic's API \examples{ \dontrun{ create_completion_anthropic( - prompt = "\n\nHuman: Hello, world!\n\nAssistant:", + prompt = list(list(role = "user", content = "Hello")), model = "claude-3-haiku-20240307", max_tokens = 1028 ) diff --git a/man/create_completion_azure_openai.Rd b/man/create_completion_azure_openai.Rd deleted file mode 100644 index 100c43cb..00000000 --- a/man/create_completion_azure_openai.Rd +++ /dev/null @@ -1,45 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-azure_openai.R -\name{create_completion_azure_openai} -\alias{create_completion_azure_openai} -\title{Generate text using Azure OpenAI's API} -\usage{ -create_completion_azure_openai( - prompt, - task = Sys.getenv("AZURE_OPENAI_TASK"), - base_url = Sys.getenv("AZURE_OPENAI_ENDPOINT"), - deployment_name = Sys.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), - api_key = Sys.getenv("AZURE_OPENAI_API_KEY"), - api_version = Sys.getenv("AZURE_OPENAI_API_VERSION") -) -} -\arguments{ -\item{prompt}{a list to use as the prompt for generating -completions} - -\item{task}{a character string for the API task (e.g. "completions"). -Defaults to the Azure OpenAI -task from environment variables if not specified.} - -\item{base_url}{a character string for the base url. It defaults to the Azure -OpenAI endpoint from environment variables if not specified.} - -\item{deployment_name}{a character string for the deployment name. It will -default to the Azure OpenAI deployment name from environment variables if -not specified.} - -\item{api_key}{a character string for the API key. It will default to the Azure -OpenAI API key from your environment variables if not specified.} - -\item{api_version}{a character string for the API version. It will default to -the Azure OpenAI API version from your environment variables if not -specified.} -} -\value{ -a list with the generated completions and other information returned -by the API -} -\description{ -Use this function to generate text completions using OpenAI's -API. -} diff --git a/man/encode_image.Rd b/man/encode_image.Rd deleted file mode 100644 index bf55f48d..00000000 --- a/man/encode_image.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-openai_api_calls.R -\name{encode_image} -\alias{encode_image} -\title{Encode an image file to base64} -\usage{ -encode_image(image_path) -} -\arguments{ -\item{image_path}{String containing the path to the image file} -} -\value{ -A base64 encoded string of the image -} -\description{ -Encode an image file to base64 -} diff --git a/man/get_available_endpoints.Rd b/man/get_available_endpoints.Rd index d3d881a0..a5f8720a 100644 --- a/man/get_available_endpoints.Rd +++ b/man/get_available_endpoints.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-openai_api_calls.R +% Please edit documentation in R/service-openai.R \name{get_available_endpoints} \alias{get_available_endpoints} \title{List supported endpoints} diff --git a/man/query_api_anthropic.Rd b/man/query_api_anthropic.Rd deleted file mode 100644 index 6753b8de..00000000 --- a/man/query_api_anthropic.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-anthropic.R -\name{query_api_anthropic} -\alias{query_api_anthropic} -\title{A function that sends a request to the Anthropic API and returns the -response.} -\usage{ -query_api_anthropic(request_body, key = Sys.getenv("ANTHROPIC_API_KEY")) -} -\arguments{ -\item{request_body}{A list that contains the parameters for the task.} - -\item{key}{String containing an Anthropic API key. Defaults -to the ANTHROPIC_API_KEY environmental variable if not specified.} -} -\value{ -The response from the API. -} -\description{ -A function that sends a request to the Anthropic API and returns the -response. -} diff --git a/man/query_api_google.Rd b/man/query_api_google.Rd deleted file mode 100644 index 6ca0552e..00000000 --- a/man/query_api_google.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-google.R -\name{query_api_google} -\alias{query_api_google} -\title{A function that sends a request to the Google AI Studio API and returns the -response.} -\usage{ -query_api_google(model, request_body, key = Sys.getenv("GOOGLE_API_KEY")) -} -\arguments{ -\item{model}{A character string that specifies the model to send to the API.} - -\item{request_body}{A list that contains the parameters for the task.} - -\item{key}{String containing a Google AI Studio API key. Defaults -to the GOOGLE_API_KEY environmental variable if not specified.} -} -\value{ -The response from the API. -} -\description{ -A function that sends a request to the Google AI Studio API and returns the -response. -} diff --git a/man/query_api_openai.Rd b/man/query_api_openai.Rd deleted file mode 100644 index a83e31df..00000000 --- a/man/query_api_openai.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-openai_api_calls.R -\name{query_api_openai} -\alias{query_api_openai} -\title{A function that sends a request to the OpenAI API and returns the response.} -\usage{ -query_api_openai( - task, - request_body, - openai_api_key = Sys.getenv("OPENAI_API_KEY") -) -} -\arguments{ -\item{task}{A character string that specifies the task to send to the API.} - -\item{request_body}{A list that contains the parameters for the task.} - -\item{openai_api_key}{String containing an OpenAI API key. Defaults to the OPENAI_API_KEY -environmental variable if not specified.} -} -\value{ -The response from the API. -} -\description{ -A function that sends a request to the OpenAI API and returns the response. -} diff --git a/man/request_base.Rd b/man/request_base.Rd deleted file mode 100644 index 739dac25..00000000 --- a/man/request_base.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-openai_api_calls.R -\name{request_base} -\alias{request_base} -\title{Base for a request to the OPENAI API} -\usage{ -request_base(task, token = Sys.getenv("OPENAI_API_KEY")) -} -\arguments{ -\item{task}{character string specifying an OpenAI API endpoint task} - -\item{token}{String containing an OpenAI API key. Defaults to the OPENAI_API_KEY -environmental variable if not specified.} -} -\value{ -An httr2 request object -} -\description{ -This function sends a request to a specific OpenAI API \code{task} endpoint at -the base URL \code{https://api.openai.com/v1}, and authenticates with -an API key using a Bearer token. -} diff --git a/man/request_base_anthropic.Rd b/man/request_base_anthropic.Rd deleted file mode 100644 index c4acced9..00000000 --- a/man/request_base_anthropic.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-anthropic.R -\name{request_base_anthropic} -\alias{request_base_anthropic} -\title{Base for a request to the Anthropic API} -\usage{ -request_base_anthropic(key = Sys.getenv("ANTHROPIC_API_KEY")) -} -\arguments{ -\item{key}{String containing an Anthropic API key. Defaults to the -ANTHROPIC_API_KEY environmental variable if not specified.} -} -\value{ -An httr2 request object -} -\description{ -This function sends a request to the Anthropic API endpoint and -authenticates with an API key. -} diff --git a/man/request_base_google.Rd b/man/request_base_google.Rd deleted file mode 100644 index 723de609..00000000 --- a/man/request_base_google.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-google.R -\name{request_base_google} -\alias{request_base_google} -\title{Base for a request to the Google AI Studio API} -\usage{ -request_base_google(model, key = Sys.getenv("GOOGLE_API_KEY")) -} -\arguments{ -\item{model}{character string specifying a Google AI Studio API model} - -\item{key}{String containing a Google AI Studio API key. Defaults to the -GOOGLE_API_KEY environmental variable if not specified.} -} -\value{ -An httr2 request object -} -\description{ -This function sends a request to a specific Google AI Studio API endpoint and -authenticates with an API key. -} diff --git a/man/stream_chat_completion.Rd b/man/stream_chat_completion.Rd deleted file mode 100644 index 25ed5736..00000000 --- a/man/stream_chat_completion.Rd +++ /dev/null @@ -1,35 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service-openai_streaming.R -\name{stream_chat_completion} -\alias{stream_chat_completion} -\title{Stream Chat Completion} -\usage{ -stream_chat_completion( - messages = list(list(role = "user", content = "Hi there!")), - element_callback = openai_handler, - model = "gpt-4o-mini", - openai_api_key = Sys.getenv("OPENAI_API_KEY") -) -} -\arguments{ -\item{messages}{A list of messages in the conversation, -including the current user prompt (optional).} - -\item{element_callback}{A callback function to handle each element -of the streamed response (optional).} - -\item{model}{A character string specifying the model to use for chat completion. -The default model is "gpt-4o-mini".} - -\item{openai_api_key}{A character string of the OpenAI API key. -By default, it is fetched from the "OPENAI_API_KEY" environment variable. -Please note that the OpenAI API key is sensitive information and should be -treated accordingly.} -} -\value{ -The same as \code{httr2::req_perform_stream} -} -\description{ -\code{stream_chat_completion} sends the prepared chat completion request to the -OpenAI API and retrieves the streamed response. -} diff --git a/tests/testthat/_snaps/api_skeletons.md b/tests/testthat/_snaps/api_skeletons.md index 0b2fa2d4..18468964 100644 --- a/tests/testthat/_snaps/api_skeletons.md +++ b/tests/testthat/_snaps/api_skeletons.md @@ -1,4 +1,4 @@ -# multiplication works +# create skeletons works Code gptstudio_create_skeleton() @@ -62,7 +62,7 @@ $stream - [1] FALSE + [1] TRUE $extras list() diff --git a/tests/testthat/test-api_skeletons.R b/tests/testthat/test-api_skeletons.R index 7f5f0b06..3d55c954 100644 --- a/tests/testthat/test-api_skeletons.R +++ b/tests/testthat/test-api_skeletons.R @@ -10,7 +10,7 @@ withr::local_envvar( ) ) -test_that("multiplication works", { +test_that("create skeletons works", { config <- yaml::read_yaml(system.file("rstudio/config.yml", package = "gptstudio" )) @@ -78,7 +78,7 @@ test_that("gptstudio_create_skeleton creates correct skeleton for Anthropic", { expect_s3_class(skeleton, "gptstudio_request_anthropic") expect_equal(skeleton$model, "claude-3-5-sonnet-20240620") expect_equal(skeleton$prompt, "What is R?") - expect_false(skeleton$stream) + expect_true(skeleton$stream) }) test_that("gptstudio_create_skeleton creates correct skeleton for Cohere", { diff --git a/tests/testthat/test-gpt_api_calls.R b/tests/testthat/test-gpt_api_calls.R index 3a0c3648..10a83627 100644 --- a/tests/testthat/test-gpt_api_calls.R +++ b/tests/testthat/test-gpt_api_calls.R @@ -29,7 +29,7 @@ test_that("OpenAI create edit fails with bad key", { test_that("OpenAI create chat completion fails with bad key", { expect_error( - openai_create_chat_completion( + create_chat_openai( prompt = "What is your name?", openai_api_key = sample_key ) diff --git a/tests/testthat/test-service-azure_openai.R b/tests/testthat/test-service-azure_openai.R index c78192f9..d1b3a279 100644 --- a/tests/testthat/test-service-azure_openai.R +++ b/tests/testthat/test-service-azure_openai.R @@ -1,30 +1,3 @@ -test_that("create_completion_azure_openai formats request correctly", { - mock_query_api <- function(task, request_body, base_url, deployment_name, - api_key, api_version) { - list(choices = list(list(message = list(content = "Mocked response")))) - } - - withr::with_envvar( - new = c( - AZURE_OPENAI_TASK = "env_task", - AZURE_OPENAI_ENDPOINT = "https://env.openai.azure.com", - AZURE_OPENAI_DEPLOYMENT_NAME = "env_deployment", - AZURE_OPENAI_API_KEY = "env_token", - AZURE_OPENAI_API_VERSION = "env_version" - ), - { - local_mocked_bindings( - query_api_azure_openai = mock_query_api - ) - - result <- create_completion_azure_openai("Test prompt") - - expect_type(result, "list") - expect_equal(result$choices[[1]]$message$content, "Mocked response") - } - ) -}) - test_that("request_base_azure_openai constructs correct request", { mock_request <- function(url) { structure(list(url = url, headers = list()), class = "httr2_request") @@ -75,81 +48,6 @@ test_that("request_base_azure_openai constructs correct request", { ) }) -test_that("query_api_azure_openai handles successful response", { - mock_request_base <- function(...) { - structure(list(url = "https://test.openai.azure.com", headers = list()), - class = "httr2_request" - ) - } - - mock_req_perform <- function(req) { - structure(list(status_code = 200, body = '{"result": "success"}'), - class = "httr2_response" - ) - } - - mock_resp_body_json <- function(resp) list(result = "success") - - local_mocked_bindings( - request_base_azure_openai = mock_request_base, - req_body_json = function(req, body) req, - req_retry = function(req, max_tries) req, - req_error = function(req, is_error) req, - req_perform = mock_req_perform, - resp_is_error = function(resp) FALSE, - resp_body_json = mock_resp_body_json - ) - - result <- query_api_azure_openai( - task = "test_task", - request_body = list(list(role = "user", content = "Test prompt")), - base_url = "https://test.openai.azure.com", - deployment_name = "test_deployment", - api_key = "test_token", - api_version = "test_version" - ) - - expect_type(result, "list") - expect_equal(result$result, "success") -}) - -test_that("query_api_azure_openai handles error response", { - mock_request_base <- function(...) { - structure(list(url = "https://test.openai.azure.com", headers = list()), - class = "httr2_request" - ) - } - - mock_req_perform <- function(req) { - structure(list(status_code = 400, body = '{"error": "Bad Request"}'), - class = "httr2_response" - ) - } - - local_mocked_bindings( - request_base_azure_openai = mock_request_base, - req_body_json = function(req, body) req, - req_retry = function(req, max_tries) req, - req_error = function(req, is_error) req, - req_perform = mock_req_perform, - resp_is_error = function(resp) TRUE, - resp_status = function(resp) 400, - resp_status_desc = function(resp) "Bad Request" - ) - - expect_error( - query_api_azure_openai( - task = "test_task", - request_body = list(list(role = "user", content = "Test prompt")), - base_url = "https://test.openai.azure.com", - deployment_name = "test_deployment", - api_key = "test_token", - api_version = "test_version" - ), - "Azure OpenAI API request failed. Error 400 - Bad Request" - ) -}) - # Test token retrieval -------------------------------------------------------- test_that("retrieve_azure_token successfully gets existing token", { diff --git a/tests/testthat/test-service-openai_streaming.R b/tests/testthat/test-service-openai_streaming.R index f81f3f8f..e69de29b 100644 --- a/tests/testthat/test-service-openai_streaming.R +++ b/tests/testthat/test-service-openai_streaming.R @@ -1,18 +0,0 @@ -test_that("OpenaiStreamParser works with different kinds of data values", { - openai_parser <- function(sse) { - parser <- OpenaiStreamParser$new() - parser$parse_sse(sse) - - parser$events - } - - event1 <- "data: []" - event2 <- paste0("data: ", jsonlite::toJSON(chat_message_default())) - event3 <- "message: data is empty here" - event4 <- "data : [DONE]" - - expect_type(openai_parser(event1), "list") - expect_type(openai_parser(event2), "list") - expect_type(openai_parser(event3), "list") - expect_type(openai_parser(event4), "list") -})