Skip to content

Commit

Permalink
Merge pull request #18 from edgararuiz/updates
Browse files Browse the repository at this point in the history
Llama 3.2 integration
  • Loading branch information
edgararuiz authored Sep 29, 2024
2 parents 7154570 + dd8fdf0 commit 148690d
Show file tree
Hide file tree
Showing 43 changed files with 196 additions and 127 deletions.
1 change: 1 addition & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ S3method(llm_sentiment,data.frame)
S3method(llm_summarize,"tbl_Spark SQL")
S3method(llm_summarize,data.frame)
S3method(llm_translate,data.frame)
S3method(m_backend_prompt,mall_llama3.2)
S3method(m_backend_prompt,mall_session)
S3method(m_backend_submit,mall_ollama)
S3method(m_backend_submit,mall_simulate_llm)
Expand Down
2 changes: 1 addition & 1 deletion R/llm-classify.R
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#'
#' data("reviews")
#'
#' llm_use("ollama", "llama3.1", seed = 100, .silent = TRUE)
#' llm_use("ollama", "llama3.2", seed = 100, .silent = TRUE)
#'
#' llm_classify(reviews, review, c("appliance", "computer"))
#'
Expand Down
2 changes: 1 addition & 1 deletion R/llm-custom.R
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
#'
#' data("reviews")
#'
#' llm_use("ollama", "llama3.1", seed = 100, .silent = TRUE)
#' llm_use("ollama", "llama3.2", seed = 100, .silent = TRUE)
#'
#' my_prompt <- paste(
#' "Answer a question.",
Expand Down
2 changes: 1 addition & 1 deletion R/llm-extract.R
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
#'
#' data("reviews")
#'
#' llm_use("ollama", "llama3.1", seed = 100, .silent = TRUE)
#' llm_use("ollama", "llama3.2", seed = 100, .silent = TRUE)
#'
#' # Use 'labels' to let the function know what to extract
#' llm_extract(reviews, review, labels = "product")
Expand Down
2 changes: 1 addition & 1 deletion R/llm-sentiment.R
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
#'
#' data("reviews")
#'
#' llm_use("ollama", "llama3.1", seed = 100, .silent = TRUE)
#' llm_use("ollama", "llama3.2", seed = 100, .silent = TRUE)
#'
#' llm_sentiment(reviews, review)
#'
Expand Down
2 changes: 1 addition & 1 deletion R/llm-summarize.R
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#'
#' data("reviews")
#'
#' llm_use("ollama", "llama3.1", seed = 100, .silent = TRUE)
#' llm_use("ollama", "llama3.2", seed = 100, .silent = TRUE)
#'
#' # Use max_words to set the maximum number of words to use for the summary
#' llm_summarize(reviews, review, max_words = 5)
Expand Down
2 changes: 1 addition & 1 deletion R/llm-translate.R
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#'
#' data("reviews")
#'
#' llm_use("ollama", "llama3.1", seed = 100, .silent = TRUE)
#' llm_use("ollama", "llama3.2", seed = 100, .silent = TRUE)
#'
#' # Pass the desired language to translate to
#' llm_translate(reviews, review, "spanish")
Expand Down
4 changes: 2 additions & 2 deletions R/llm-use.R
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@
#' \dontrun{
#' library(mall)
#'
#' llm_use("ollama", "llama3.1")
#' llm_use("ollama", "llama3.2")
#'
#' # Additional arguments will be passed 'as-is' to the
#' # downstream R function in this example, to ollama::chat()
#' llm_use("ollama", "llama3.1", seed = 100, temp = 0.1)
#' llm_use("ollama", "llama3.2", seed = 100, temp = 0.1)
#'
#' # During the R session, you can change any argument
#' # individually and it will retain all of previous
Expand Down
49 changes: 49 additions & 0 deletions R/m-backend-prompt.R
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,55 @@ m_backend_prompt <- function(backend, additional) {
UseMethod("m_backend_prompt")
}

#' @export
m_backend_prompt.mall_llama3.2 <- function(backend, additional = "") {
base_method <- NextMethod()
base_method$extract <- function(labels) {
no_labels <- length(labels)
col_labels <- paste0(labels, collapse = ", ")
plural <- ifelse(no_labels > 1, "s", "")
text_multi <- ifelse(
no_labels > 1,
"Return the response exclusively in a pipe separated list, and no headers. ",
""
)
list(
list(
role = "user",
content = glue(paste(
"You are a helpful text extraction engine.",
"Extract the {col_labels} being referred to on the text.",
"I expect {no_labels} item{plural} exactly.",
"No capitalization. No explanations.",
"{text_multi}",
"{additional}",
"The answer is based on the following text:\n{{x}}"
))
)
)
}
base_method$classify <- function(labels) {
labels <- process_labels(
x = labels,
if_character = "Determine if the text refers to one of the following: {x}",
if_formula = "If it classifies as {f_lhs(x)} then return {f_rhs(x)}"
)
list(
list(
role = "user",
content = glue(paste(
"You are a helpful classification engine.",
"{labels}.",
"No capitalization. No explanations.",
"{additional}",
"The answer is based on the following text:\n{{x}}"
))
)
)
}
base_method
}

#' @export
m_backend_prompt.mall_session <- function(backend, additional = "") {
list(
Expand Down
10 changes: 9 additions & 1 deletion R/m-defaults.R
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,16 @@ m_defaults_set <- function(...) {
nm <- names(new_args[i])
defaults[[nm]] <- new_args[[i]]
}
model <- defaults[["model"]]
split_model <- strsplit(model, "\\:")[[1]]
if (length(split_model) > 1) {
sub_model <- split_model[[1]]
} else {
sub_model <- NULL
}
obj_class <- clean_names(c(
defaults[["model"]],
model,
sub_model,
defaults[["backend"]],
"session"
))
Expand Down
37 changes: 18 additions & 19 deletions README.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ library(dbplyr)
library(tictoc)
library(DBI)
source("utils/knitr-print.R")
mall::llm_use("ollama", "llama3.1", seed = 100, .cache = "_readme_cache")
mall::llm_use("ollama", "llama3.2", seed = 100, .cache = "_readme_cache")
```

# mall
Expand Down Expand Up @@ -78,9 +78,9 @@ This saves the data scientist the need to write and tune an NLP model.
[Installation guide](https://hauselin.github.io/ollama-r/#installation)

- Download an LLM model. For example, I have been developing this package using
Llama 3.1 to test. To get that model you can run:
Llama 3.2 to test. To get that model you can run:
```r
ollamar::pull("llama3.1")
ollamar::pull("llama3.2")
```

### With Databricks
Expand Down Expand Up @@ -218,10 +218,10 @@ use.
Calling `llm_use()` directly will let you specify the model and backend to use.
You can also setup additional arguments that will be passed down to the
function that actually runs the prediction. In the case of Ollama, that function
is [`generate()`](https://hauselin.github.io/ollama-r/reference/generate.html).
is [`chat()`](https://hauselin.github.io/ollama-r/reference/chat.html).

```{r, eval = FALSE}
llm_use("ollama", "llama3.1", seed = 100, temperature = 0.2)
llm_use("ollama", "llama3.2", seed = 100, temperature = 0)
```

## Key considerations
Expand All @@ -232,7 +232,8 @@ If using this method with an LLM locally available, the cost will be a long
running time. Unless using a very specialized LLM, a given LLM is a general model.
It was fitted using a vast amount of data. So determining a response for each
row, takes longer than if using a manually created NLP model. The default model
used in Ollama is Llama 3.1, which was fitted using 8B parameters.
used in Ollama is [Llama 3.2](https://ollama.com/library/llama3.2),
which was fitted using 3B parameters.

If using an external LLM service, the consideration will need to be for the
billing costs of using such service. Keep in mind that you will be sending a lot
Expand Down Expand Up @@ -260,34 +261,32 @@ library(classmap)
data(data_bookReviews)
book_reviews <- data_bookReviews |>
head(100) |>
as_tibble()
glimpse(book_reviews)
data_bookReviews |>
glimpse()
```
As per the docs, `sentiment` is a factor indicating the sentiment of the review:
negative (1) or positive (2)

```{r}
length(strsplit(paste(book_reviews, collapse = " "), " ")[[1]])
length(strsplit(paste(head(data_bookReviews$review, 100), collapse = " "), " ")[[1]])
```

Just to get an idea of how much data we're processing, I'm using a very, very
simple word count. So we're analyzing a bit over 20 thousand words.

```{r}
reviews_llm <- book_reviews |>
reviews_llm <- data_bookReviews |>
head(100) |>
llm_sentiment(
col = review,
options = c("positive", "negative"),
options = c("positive" ~ 2, "negative" ~ 1),
pred_name = "predicted"
)
```

As far as **time**, on my Apple M3 machine, it took about 3 minutes to process,
100 rows, containing 20 thousand words. Setting `temp` to 0.2 in `llm_use()`,
made the model run a bit faster.
As far as **time**, on my Apple M3 machine, it took about 1.5 minutes to process,
100 rows, containing 20 thousand words. Setting `temp` to 0 in `llm_use()`,
made the model run faster.

The package uses `purrr` to send each prompt individually to the LLM. But, I did
try a few different ways to speed up the process, unsuccessfully:
Expand All @@ -314,8 +313,8 @@ will not be of the "truth", but rather the package's results recorded in
library(forcats)
reviews_llm |>
mutate(fct_pred = as.factor(ifelse(predicted == "positive", 2, 1))) |>
yardstick::accuracy(sentiment, fct_pred)
mutate(predicted = as.factor(predicted)) |>
yardstick::accuracy(sentiment, predicted)
```

## Vector functions
Expand Down
Loading

0 comments on commit 148690d

Please sign in to comment.