From b1307e6f613016f84c5585363787069e3fdc50a6 Mon Sep 17 00:00:00 2001 From: Edgar Ruiz Date: Wed, 3 Apr 2024 15:49:49 -0500 Subject: [PATCH] Adds details and examples to ch_submit() --- R/ch-submit.R | 46 ++++++++++++++++++++++++++++++++++++++++- man/ch_submit.Rd | 53 ++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 96 insertions(+), 3 deletions(-) diff --git a/R/ch-submit.R b/R/ch-submit.R index 28f7585..00a6116 100644 --- a/R/ch-submit.R +++ b/R/ch-submit.R @@ -1,4 +1,4 @@ -#' Method to easily integrate to new LLM's +#' Method to easily integrate to new LLM API's #' @param defaults Defaults object, generally puled from `chattr_defaults()` #' @param prompt The prompt to send to the LLM #' @param stream To output the response from the LLM as it happens, or wait until @@ -10,6 +10,50 @@ #' prompt (TRUE) #' @param ... Optional arguments; currently unused. #' @keywords internal +#' @details Use this function to integrate your own LLM API. It has a few +#' requirements to get it to work properly: +#' * The output of the function needs to be the parsed response from the LLM +#' * For those that support streaming, make sure to use the `cat()` function to +#' output the response of the LLM API as it is happening. +#' * If `preview` is set to TRUE, do not send to the LLM API. Simply return the +#' resulting prompt. +#' +#' The `defaults` argument controls which method to use. You can use the +#' `chattr_defaults()` function, and set the provider. The `provider` value +#' is what creates the R class name. It will pre-pend `cl_` to the class name. +#' See the examples for more clarity. +#' @examples +#' \dontrun{ +#' library(chattr) +#' ch_submit.ch_my_llm <- function(defaults, +#' prompt = NULL, +#' stream = NULL, +#' prompt_build = TRUE, +#' preview = FALSE, +#' ...) { +#' # Use `prompt_build` to append the prompts you with to append +#' if(prompt_build) prompt <- paste0("Use the tidyverse\n", prompt) +#' # If `preview` is true, return the resulting prompt back +#' if(preview) return(prompt) +#' llm_response <- paste0("You said this: \n", prompt) +#' if(stream) { +#' cat("streaming:\n") +#' for(i in seq_len(nchar(llm_response))) { +#' # If `stream` is true, make sure to `cat()` the current output +#' cat(substr(llm_response, i, i)) +#' Sys.sleep(0.1) +#' } +#' } +#' # Make sure to return the entire output from the LLM at the end +#' llm_response +#' } +#' +#' chattr_defaults("console", provider = "my llm") +#' chattr("hello") +#' chattr("hello", stream = FALSE) +#' chattr("hello", prompt_build = FALSE) +#' chattr("hello", preview = TRUE) +#' } #' @export ch_submit <- function(defaults, prompt = NULL, diff --git a/man/ch_submit.Rd b/man/ch_submit.Rd index 3d82305..244b92a 100644 --- a/man/ch_submit.Rd +++ b/man/ch_submit.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/ch-submit.R \name{ch_submit} \alias{ch_submit} -\title{Method to easily integrate to new LLM's} +\title{Method to easily integrate to new LLM API's} \usage{ ch_submit( defaults, @@ -31,6 +31,55 @@ prompt (TRUE)} \item{...}{Optional arguments; currently unused.} } \description{ -Method to easily integrate to new LLM's +Method to easily integrate to new LLM API's +} +\details{ +Use this function to integrate your own LLM API. It has a few +requirements to get it to work properly: +\itemize{ +\item The output of the function needs to be the parsed response from the LLM +\item For those that support streaming, make sure to use the \code{cat()} function to +output the response of the LLM API as it is happening. +\item If \code{preview} is set to TRUE, do not send to the LLM API. Simply return the +resulting prompt. +} + +The \code{defaults} argument controls which method to use. You can use the +\code{chattr_defaults()} function, and set the provider. The \code{provider} value +is what creates the R class name. It will pre-pend \code{cl_} to the class name. +See the examples for more clarity. +} +\examples{ +\dontrun{ +library(chattr) +ch_submit.ch_my_llm <- function(defaults, + prompt = NULL, + stream = NULL, + prompt_build = TRUE, + preview = FALSE, + ...) { + # Use `prompt_build` to append the prompts you with to append + if(prompt_build) prompt <- paste0("Use the tidyverse\n", prompt) + # If `preview` is true, return the resulting prompt back + if(preview) return(prompt) + llm_response <- paste0("You said this: \n", prompt) + if(stream) { + cat("streaming:\n") + for(i in seq_len(nchar(llm_response))) { + # If `stream` is true, make sure to `cat()` the current output + cat(substr(llm_response, i, i)) + Sys.sleep(0.1) + } + } + # Make sure to return the entire output from the LLM at the end + llm_response +} + +chattr_defaults("console", provider = "my llm") +chattr("hello") +chattr("hello", stream = FALSE) +chattr("hello", prompt_build = FALSE) +chattr("hello", preview = TRUE) +} } \keyword{internal}