Skip to content

Commit

Permalink
Merge pull request #86 from mlverse/updates
Browse files Browse the repository at this point in the history
Updates
  • Loading branch information
edgararuiz authored Apr 8, 2024
2 parents b6a4cfc + 5054930 commit 6688b5c
Show file tree
Hide file tree
Showing 15 changed files with 119 additions and 27 deletions.
3 changes: 1 addition & 2 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
Package: chattr
Title: Integrates LLM's with the RStudio IDE
Version: 0.0.0.9010
Version: 0.0.0.9011
Authors@R: c(
person("Edgar", "Ruiz", , "[email protected]", role = c("aut", "cre")),
person(given = "Posit Software, PBC", role = c("cph", "fnd"))
Expand Down Expand Up @@ -42,4 +42,3 @@ Suggests:
withr
Config/testthat/edition: 3
VignetteBuilder: knitr
Remotes: r-lib/httr2
10 changes: 6 additions & 4 deletions R/ch-submit.R
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,15 @@
#' preview = FALSE,
#' ...) {
#' # Use `prompt_build` to append the prompts you with to append
#' if(prompt_build) prompt <- paste0("Use the tidyverse\n", prompt)
#' if (prompt_build) prompt <- paste0("Use the tidyverse\n", prompt)
#' # If `preview` is true, return the resulting prompt back
#' if(preview) return(prompt)
#' if (preview) {
#' return(prompt)
#' }
#' llm_response <- paste0("You said this: \n", prompt)
#' if(stream) {
#' if (stream) {
#' cat("streaming:\n")
#' for(i in seq_len(nchar(llm_response))) {
#' for (i in seq_len(nchar(llm_response))) {
#' # If `stream` is true, make sure to `cat()` the current output
#' cat(substr(llm_response, i, i))
#' Sys.sleep(0.1)
Expand Down
10 changes: 8 additions & 2 deletions R/chattr-defaults.R
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#' set to NULL
#' @param include_doc_contents Send the current code in the document
#' @param include_history Indicates whether to include the chat history when
#' everytime a new prompt is submitted
#' every time a new prompt is submitted
#' @param provider The name of the provider of the LLM. Today, only "openai" is
#' is available
#' @param path The location of the model. It could be an URL or a file path.
Expand Down Expand Up @@ -52,6 +52,12 @@ chattr_defaults <- function(type = "default",
...) {
function_args <- c(as.list(environment()), ...)

if (type == "default") {
all_def <- function_args
all_def$type == "all"
chattr_defaults_set(arguments = function_args, type = "all")
}

sys_type <- Sys.getenv("CHATTR_TYPE", NA)
if (!is.na(sys_type)) {
type <- sys_type
Expand Down Expand Up @@ -109,7 +115,7 @@ chattr_defaults <- function(type = "default",
}

chattr_defaults_set(
arguments = function_args,
arguments = chattr_defaults_get("all"),
type = type
)

Expand Down
4 changes: 2 additions & 2 deletions R/chattr-test.R
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#' Confirms conectivity to LLM interface
#' Confirms connectivity to LLM interface
#' @inheritParams ch_submit
#' @returns It returns console massages with the status of the test.
#' @export
Expand Down Expand Up @@ -115,7 +115,7 @@ ch_submit.ch_test_backend <- function(
Sys.sleep(0.1)
}
}
if(is_test) {
if (is_test) {
invisible()
} else {
prompt
Expand Down
6 changes: 3 additions & 3 deletions R/chattr-use.R
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
#' Sets the LLM model to use in your session
#' @param model_label The label of the LLM model to use. Valid values are
#' 'copilot', 'gpt4', 'gpt35', and 'llamagpt'. The value 'test' is also
#' acceptable, but it is meant for package examples, and internal testin.
#' acceptable, but it is meant for package examples, and internal testing.
#' @details
#' If the error "No model setup found" was returned, that is because none of the
#' expected setup for Copilot, OpenIA or LLama was automatically detected. Here
#' expected setup for Copilot, OpenAI or LLama was automatically detected. Here
#' is how to setup a model:
#'
#' * OpenIA - The main thing `chattr` checks is the prescence of the R user's
#' * OpenAI - The main thing `chattr` checks is the presence of the R user's
#' OpenAI PAT (Personal Access Token). It looks for it in the 'OPENAI_API_KEY'
#' environment variable. Get a PAT from the OpenAI website, and save it to that
#' environment variable. Then restart R, and try again.
Expand Down
10 changes: 6 additions & 4 deletions man/ch_submit.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion man/chattr_defaults.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions man/chattr_test.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions man/chattr_use.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions tests/testthat/_snaps/backend-llamagpt.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,3 +39,11 @@
[1] "--threads" "4" "--temp" "0.01" "--n_predict"
[6] "1000" "--model"

# Output works as expected

Code
ch_llamagpt_output("tests\n> ", stream = TRUE)
Output
tests
[1] "tests\n"

54 changes: 54 additions & 0 deletions tests/testthat/_snaps/ch_defaults.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,57 @@
x Chat History
x Document contents

# Makes sure that changing something on 'default' changes it every where

Code
chattr_use("llamagpt")
Message
-- chattr
* Provider: LlamaGPT
* Path/URL: ~/LlamaGPTJ-chat/build/bin/chat
* Model: ~/ggml-gpt4all-j-v1.3-groovy.bin
* Label: GPT4ALL 1.3 (LlamaGPT)

---

Code
chattr_defaults(model = "test")
Message
-- chattr ----------------------------------------------------------------------
-- Defaults for: Default --
-- Prompt:
* Use the R language, the tidyverse, and tidymodels
-- Model
* Provider: LlamaGPT
* Path/URL: ~/LlamaGPTJ-chat/build/bin/chat
* Model: test
* Label: GPT4ALL 1.3 (LlamaGPT)
-- Model Arguments:
* threads: 4
* temp: 0.01
* n_predict: 1000
-- Context:
Max Data Files: 0
Max Data Frames: 0
x Chat History
x Document contents

# Changing something in non-default does not impact others

Code
chattr_use("llamagpt")
Message
-- chattr
* Provider: LlamaGPT
* Path/URL: ~/LlamaGPTJ-chat/build/bin/chat
* Model: ~/ggml-gpt4all-j-v1.3-groovy.bin
* Label: GPT4ALL 1.3 (LlamaGPT)

4 changes: 1 addition & 3 deletions tests/testthat/helper-utils.R
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,5 @@ test_simulate_model <- function(file, type = "console") {

test_model_backend <- function() {
chattr_use("gpt4")
chattr_defaults("chat", provider = "test backend")
chattr_defaults("console", provider = "test backend")
chattr_defaults("script", provider = "test backend")
chattr_defaults(provider = "test backend")
}
6 changes: 6 additions & 0 deletions tests/testthat/test-backend-llamagpt.R
Original file line number Diff line number Diff line change
Expand Up @@ -63,3 +63,9 @@ test_that("Args output is correct", {
out <- out[!model_line]
expect_snapshot(out)
})

test_that("Output works as expected", {
expect_snapshot(
ch_llamagpt_output("tests\n> ", stream = TRUE)
)
})
17 changes: 17 additions & 0 deletions tests/testthat/test-ch_defaults.R
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,20 @@ test_that("Basic default tests", {
expect_snapshot(chattr_defaults())
test_chattr_type_unset()
})

test_that("Makes sure that changing something on 'default' changes it every where", {
expect_snapshot(chattr_use("llamagpt"))
expect_snapshot(chattr_defaults(model = "test"))
test_chattr_type_set("chat")
x <- chattr_defaults()
expect_equal(x$model, "test")
expect_equal(x$type, "chat")
test_chattr_type_unset()
})

test_that("Changing something in non-default does not impact others", {
expect_snapshot(chattr_use("llamagpt"))
chattr_defaults("chat", model = "test")
x <- chattr_defaults("console")
expect_true(x$model != "test")
})
2 changes: 1 addition & 1 deletion vignettes/openai-gpt.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ To switch back to GPT 4, run:
chattr_use("gpt4")
```

To see the latest list which endpoint to use, go to : [Model Endpoint Compatability](https://platform.openai.com/docs/models/model-endpoint-compatibility)
To see the latest list which endpoint to use, go to : [Model Endpoint Compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)

## Data files and data frames

Expand Down

0 comments on commit 6688b5c

Please sign in to comment.