From 611f2b2ede3f722c4f13680afdbe090b70016ecf Mon Sep 17 00:00:00 2001 From: karthik2804 Date: Fri, 13 Sep 2024 11:08:30 +0200 Subject: [PATCH] lint fixes Signed-off-by: karthik2804 --- crates/llm-local/src/llama.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/llm-local/src/llama.rs b/crates/llm-local/src/llama.rs index 84f218c8e6..66976ab3c4 100644 --- a/crates/llm-local/src/llama.rs +++ b/crates/llm-local/src/llama.rs @@ -15,7 +15,7 @@ use tokenizers::Tokenizer; const TOKENIZER_FILENAME: &str = "tokenizer.json"; const CONFIG_FILENAME: &str = "config.json"; const EOS_TOKEN: &str = ""; -const MODEL_SAFETENSORS_INDEX: &str = "model.safetensors.index.json"; +const MODEL_SAFETENSORS_INDEX_FILE: &str = "model.safetensors.index.json"; pub fn auto_device() -> Result { if utils::cuda_is_available() { @@ -51,7 +51,7 @@ impl LlamaModels { let config = config.into_config(false); let cache = llama::Cache::new(true, dtype, &config, &device)?; - let safetensor_files = load_safetensors(&model_dir, MODEL_SAFETENSORS_INDEX)?; + let safetensor_files = load_safetensors(model_dir, MODEL_SAFETENSORS_INDEX_FILE)?; // TODO: Check if there is a safe way to load the model from the file let vb = unsafe { VarBuilder::from_mmaped_safetensors(&safetensor_files, dtype, &device)? };