From 250f531b4068d475c7043c172055e312f7a26a41 Mon Sep 17 00:00:00 2001 From: Edgar Ruiz Date: Mon, 8 Apr 2024 14:41:14 -0500 Subject: [PATCH 1/5] Ver bump, remove httr2 remotes, fixes misspell --- DESCRIPTION | 3 +-- R/chattr-use.R | 2 +- man/chattr_use.Rd | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 4ec8f64..e5e210e 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: chattr Title: Integrates LLM's with the RStudio IDE -Version: 0.0.0.9010 +Version: 0.0.0.9011 Authors@R: c( person("Edgar", "Ruiz", , "edgar@posit.co", role = c("aut", "cre")), person(given = "Posit Software, PBC", role = c("cph", "fnd")) @@ -42,4 +42,3 @@ Suggests: withr Config/testthat/edition: 3 VignetteBuilder: knitr -Remotes: r-lib/httr2 diff --git a/R/chattr-use.R b/R/chattr-use.R index d88096f..29731e0 100644 --- a/R/chattr-use.R +++ b/R/chattr-use.R @@ -1,7 +1,7 @@ #' Sets the LLM model to use in your session #' @param model_label The label of the LLM model to use. Valid values are #' 'copilot', 'gpt4', 'gpt35', and 'llamagpt'. The value 'test' is also -#' acceptable, but it is meant for package examples, and internal testin. +#' acceptable, but it is meant for package examples, and internal testing. #' @details #' If the error "No model setup found" was returned, that is because none of the #' expected setup for Copilot, OpenIA or LLama was automatically detected. Here diff --git a/man/chattr_use.Rd b/man/chattr_use.Rd index 10cb0da..f4abfad 100644 --- a/man/chattr_use.Rd +++ b/man/chattr_use.Rd @@ -9,7 +9,7 @@ chattr_use(model_label = NULL) \arguments{ \item{model_label}{The label of the LLM model to use. Valid values are 'copilot', 'gpt4', 'gpt35', and 'llamagpt'. The value 'test' is also -acceptable, but it is meant for package examples, and internal testin.} +acceptable, but it is meant for package examples, and internal testing.} } \value{ It returns console messages to allow the user select the model to From 573a0f6186fe82002e740b9e685057844ac1b8d1 Mon Sep 17 00:00:00 2001 From: Edgar Ruiz Date: Mon, 8 Apr 2024 16:24:20 -0500 Subject: [PATCH 2/5] Fixes #85 --- R/chattr-defaults.R | 8 ++++- tests/testthat/_snaps/ch_defaults.md | 54 ++++++++++++++++++++++++++++ tests/testthat/helper-utils.R | 4 +-- tests/testthat/test-ch_defaults.R | 17 +++++++++ 4 files changed, 79 insertions(+), 4 deletions(-) diff --git a/R/chattr-defaults.R b/R/chattr-defaults.R index d4a92e8..5369d1f 100644 --- a/R/chattr-defaults.R +++ b/R/chattr-defaults.R @@ -52,6 +52,12 @@ chattr_defaults <- function(type = "default", ...) { function_args <- c(as.list(environment()), ...) + if(type == "default") { + all_def <- function_args + all_def$type == "all" + chattr_defaults_set(arguments = function_args, type = "all") + } + sys_type <- Sys.getenv("CHATTR_TYPE", NA) if (!is.na(sys_type)) { type <- sys_type @@ -109,7 +115,7 @@ chattr_defaults <- function(type = "default", } chattr_defaults_set( - arguments = function_args, + arguments = chattr_defaults_get("all"), type = type ) diff --git a/tests/testthat/_snaps/ch_defaults.md b/tests/testthat/_snaps/ch_defaults.md index dc83fcb..7148ab5 100644 --- a/tests/testthat/_snaps/ch_defaults.md +++ b/tests/testthat/_snaps/ch_defaults.md @@ -40,3 +40,57 @@ x Chat History x Document contents +# Makes sure that changing something on 'default' changes it every where + + Code + chattr_use("llamagpt") + Message + + -- chattr + * Provider: LlamaGPT + * Path/URL: ~/LlamaGPTJ-chat/build/bin/chat + * Model: ~/ggml-gpt4all-j-v1.3-groovy.bin + * Label: GPT4ALL 1.3 (LlamaGPT) + +--- + + Code + chattr_defaults(model = "test") + Message + + -- chattr ---------------------------------------------------------------------- + + -- Defaults for: Default -- + + -- Prompt: + * Use the R language, the tidyverse, and tidymodels + + -- Model + * Provider: LlamaGPT + * Path/URL: ~/LlamaGPTJ-chat/build/bin/chat + * Model: test + * Label: GPT4ALL 1.3 (LlamaGPT) + + -- Model Arguments: + * threads: 4 + * temp: 0.01 + * n_predict: 1000 + + -- Context: + Max Data Files: 0 + Max Data Frames: 0 + x Chat History + x Document contents + +# Changing something in non-default does not impact others + + Code + chattr_use("llamagpt") + Message + + -- chattr + * Provider: LlamaGPT + * Path/URL: ~/LlamaGPTJ-chat/build/bin/chat + * Model: ~/ggml-gpt4all-j-v1.3-groovy.bin + * Label: GPT4ALL 1.3 (LlamaGPT) + diff --git a/tests/testthat/helper-utils.R b/tests/testthat/helper-utils.R index 1fd6e6d..5586e87 100644 --- a/tests/testthat/helper-utils.R +++ b/tests/testthat/helper-utils.R @@ -6,7 +6,5 @@ test_simulate_model <- function(file, type = "console") { test_model_backend <- function() { chattr_use("gpt4") - chattr_defaults("chat", provider = "test backend") - chattr_defaults("console", provider = "test backend") - chattr_defaults("script", provider = "test backend") + chattr_defaults(provider = "test backend") } diff --git a/tests/testthat/test-ch_defaults.R b/tests/testthat/test-ch_defaults.R index 2c6897b..6783f7a 100644 --- a/tests/testthat/test-ch_defaults.R +++ b/tests/testthat/test-ch_defaults.R @@ -4,3 +4,20 @@ test_that("Basic default tests", { expect_snapshot(chattr_defaults()) test_chattr_type_unset() }) + +test_that("Makes sure that changing something on 'default' changes it every where", { + expect_snapshot(chattr_use("llamagpt")) + expect_snapshot(chattr_defaults(model = "test")) + test_chattr_type_set("chat") + x <- chattr_defaults() + expect_equal(x$model, "test") + expect_equal(x$type, "chat") + test_chattr_type_unset() +}) + +test_that("Changing something in non-default does not impact others", { + expect_snapshot(chattr_use("llamagpt")) + chattr_defaults("chat", model = "test") + x <- chattr_defaults("console") + expect_true(x$model != "test") +}) From 15662d1a5c50659ba713592e7cb0b87326de2c1f Mon Sep 17 00:00:00 2001 From: Edgar Ruiz Date: Mon, 8 Apr 2024 16:32:05 -0500 Subject: [PATCH 3/5] Spelling corrections, stlyer updates --- R/ch-submit.R | 10 ++++++---- R/chattr-defaults.R | 4 ++-- R/chattr-test.R | 4 ++-- R/chattr-use.R | 4 ++-- man/chattr_defaults.Rd | 2 +- man/chattr_test.Rd | 4 ++-- man/chattr_use.Rd | 4 ++-- vignettes/openai-gpt.Rmd | 2 +- 8 files changed, 18 insertions(+), 16 deletions(-) diff --git a/R/ch-submit.R b/R/ch-submit.R index 00a6116..fe30599 100644 --- a/R/ch-submit.R +++ b/R/ch-submit.R @@ -32,13 +32,15 @@ #' preview = FALSE, #' ...) { #' # Use `prompt_build` to append the prompts you with to append -#' if(prompt_build) prompt <- paste0("Use the tidyverse\n", prompt) +#' if (prompt_build) prompt <- paste0("Use the tidyverse\n", prompt) #' # If `preview` is true, return the resulting prompt back -#' if(preview) return(prompt) +#' if (preview) { +#' return(prompt) +#' } #' llm_response <- paste0("You said this: \n", prompt) -#' if(stream) { +#' if (stream) { #' cat("streaming:\n") -#' for(i in seq_len(nchar(llm_response))) { +#' for (i in seq_len(nchar(llm_response))) { #' # If `stream` is true, make sure to `cat()` the current output #' cat(substr(llm_response, i, i)) #' Sys.sleep(0.1) diff --git a/R/chattr-defaults.R b/R/chattr-defaults.R index 5369d1f..e300fc8 100644 --- a/R/chattr-defaults.R +++ b/R/chattr-defaults.R @@ -13,7 +13,7 @@ #' set to NULL #' @param include_doc_contents Send the current code in the document #' @param include_history Indicates whether to include the chat history when -#' everytime a new prompt is submitted +#' every time a new prompt is submitted #' @param provider The name of the provider of the LLM. Today, only "openai" is #' is available #' @param path The location of the model. It could be an URL or a file path. @@ -52,7 +52,7 @@ chattr_defaults <- function(type = "default", ...) { function_args <- c(as.list(environment()), ...) - if(type == "default") { + if (type == "default") { all_def <- function_args all_def$type == "all" chattr_defaults_set(arguments = function_args, type = "all") diff --git a/R/chattr-test.R b/R/chattr-test.R index 9fd9826..5b639b2 100644 --- a/R/chattr-test.R +++ b/R/chattr-test.R @@ -1,4 +1,4 @@ -#' Confirms conectivity to LLM interface +#' Confirms connectivity to LLM interface #' @inheritParams ch_submit #' @returns It returns console massages with the status of the test. #' @export @@ -115,7 +115,7 @@ ch_submit.ch_test_backend <- function( Sys.sleep(0.1) } } - if(is_test) { + if (is_test) { invisible() } else { prompt diff --git a/R/chattr-use.R b/R/chattr-use.R index 29731e0..5d9a985 100644 --- a/R/chattr-use.R +++ b/R/chattr-use.R @@ -4,10 +4,10 @@ #' acceptable, but it is meant for package examples, and internal testing. #' @details #' If the error "No model setup found" was returned, that is because none of the -#' expected setup for Copilot, OpenIA or LLama was automatically detected. Here +#' expected setup for Copilot, OpenAI or LLama was automatically detected. Here #' is how to setup a model: #' -#' * OpenIA - The main thing `chattr` checks is the prescence of the R user's +#' * OpenAI - The main thing `chattr` checks is the presence of the R user's #' OpenAI PAT (Personal Access Token). It looks for it in the 'OPENAI_API_KEY' #' environment variable. Get a PAT from the OpenAI website, and save it to that #' environment variable. Then restart R, and try again. diff --git a/man/chattr_defaults.Rd b/man/chattr_defaults.Rd index ab8bde9..a4726f6 100644 --- a/man/chattr_defaults.Rd +++ b/man/chattr_defaults.Rd @@ -38,7 +38,7 @@ set to NULL} \item{include_doc_contents}{Send the current code in the document} \item{include_history}{Indicates whether to include the chat history when -everytime a new prompt is submitted} +every time a new prompt is submitted} \item{provider}{The name of the provider of the LLM. Today, only "openai" is is available} diff --git a/man/chattr_test.Rd b/man/chattr_test.Rd index 8bd7edc..48a6f30 100644 --- a/man/chattr_test.Rd +++ b/man/chattr_test.Rd @@ -3,7 +3,7 @@ \name{chattr_test} \alias{chattr_test} \alias{ch_test} -\title{Confirms conectivity to LLM interface} +\title{Confirms connectivity to LLM interface} \usage{ chattr_test(defaults = NULL) @@ -16,5 +16,5 @@ ch_test(defaults = NULL) It returns console massages with the status of the test. } \description{ -Confirms conectivity to LLM interface +Confirms connectivity to LLM interface } diff --git a/man/chattr_use.Rd b/man/chattr_use.Rd index f4abfad..997534b 100644 --- a/man/chattr_use.Rd +++ b/man/chattr_use.Rd @@ -20,10 +20,10 @@ Sets the LLM model to use in your session } \details{ If the error "No model setup found" was returned, that is because none of the -expected setup for Copilot, OpenIA or LLama was automatically detected. Here +expected setup for Copilot, OpenAI or LLama was automatically detected. Here is how to setup a model: \itemize{ -\item OpenIA - The main thing \code{chattr} checks is the prescence of the R user's +\item OpenAI - The main thing \code{chattr} checks is the presence of the R user's OpenAI PAT (Personal Access Token). It looks for it in the 'OPENAI_API_KEY' environment variable. Get a PAT from the OpenAI website, and save it to that environment variable. Then restart R, and try again. diff --git a/vignettes/openai-gpt.Rmd b/vignettes/openai-gpt.Rmd index 323e4fa..ba533cd 100644 --- a/vignettes/openai-gpt.Rmd +++ b/vignettes/openai-gpt.Rmd @@ -79,7 +79,7 @@ To switch back to GPT 4, run: chattr_use("gpt4") ``` -To see the latest list which endpoint to use, go to : [Model Endpoint Compatability](https://platform.openai.com/docs/models/model-endpoint-compatibility) +To see the latest list which endpoint to use, go to : [Model Endpoint Compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) ## Data files and data frames From 81461edb27b08174401dc9008b49a0f66f1bf218 Mon Sep 17 00:00:00 2001 From: Edgar Ruiz Date: Mon, 8 Apr 2024 16:52:48 -0500 Subject: [PATCH 4/5] Adds extra llamagpt test --- tests/testthat/_snaps/backend-llamagpt.md | 8 ++++++++ tests/testthat/test-backend-llamagpt.R | 6 ++++++ 2 files changed, 14 insertions(+) diff --git a/tests/testthat/_snaps/backend-llamagpt.md b/tests/testthat/_snaps/backend-llamagpt.md index d5a5ab3..dc5c715 100644 --- a/tests/testthat/_snaps/backend-llamagpt.md +++ b/tests/testthat/_snaps/backend-llamagpt.md @@ -39,3 +39,11 @@ [1] "--threads" "4" "--temp" "0.01" "--n_predict" [6] "1000" "--model" +# Output works as expected + + Code + ch_llamagpt_output("tests\n> ", stream = TRUE) + Output + tests + [1] "tests\n" + diff --git a/tests/testthat/test-backend-llamagpt.R b/tests/testthat/test-backend-llamagpt.R index 7d786af..ecfb646 100644 --- a/tests/testthat/test-backend-llamagpt.R +++ b/tests/testthat/test-backend-llamagpt.R @@ -63,3 +63,9 @@ test_that("Args output is correct", { out <- out[!model_line] expect_snapshot(out) }) + +test_that("Output works as expected", { + expect_snapshot( + ch_llamagpt_output("tests\n> ", stream = TRUE) + ) +}) From 50549305f813e233fda8de0029dfb2149a052b18 Mon Sep 17 00:00:00 2001 From: Edgar Ruiz Date: Mon, 8 Apr 2024 16:54:25 -0500 Subject: [PATCH 5/5] Updates docs --- man/ch_submit.Rd | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/man/ch_submit.Rd b/man/ch_submit.Rd index 244b92a..15fe94f 100644 --- a/man/ch_submit.Rd +++ b/man/ch_submit.Rd @@ -59,13 +59,15 @@ ch_submit.ch_my_llm <- function(defaults, preview = FALSE, ...) { # Use `prompt_build` to append the prompts you with to append - if(prompt_build) prompt <- paste0("Use the tidyverse\n", prompt) + if (prompt_build) prompt <- paste0("Use the tidyverse\n", prompt) # If `preview` is true, return the resulting prompt back - if(preview) return(prompt) + if (preview) { + return(prompt) + } llm_response <- paste0("You said this: \n", prompt) - if(stream) { + if (stream) { cat("streaming:\n") - for(i in seq_len(nchar(llm_response))) { + for (i in seq_len(nchar(llm_response))) { # If `stream` is true, make sure to `cat()` the current output cat(substr(llm_response, i, i)) Sys.sleep(0.1)