Skip to content

Commit

Permalink
Accomodate mistrallite naming
Browse files Browse the repository at this point in the history
  • Loading branch information
Josh-XT committed Oct 25, 2023
1 parent b9b5b0e commit ce58a9b
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 4 deletions.
10 changes: 7 additions & 3 deletions local_llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,9 +146,13 @@ def get_model(model_name="Mistral-7B-OpenOrca", models_dir="models"):
if DOWNLOAD_MODELS is False:
raise Exception("Model not found.")
url = (
model_url
if "https://" in model_url
else f"https://huggingface.co/{model_url}/resolve/main/{model_name}.{quantization_type}.gguf"
(
model_url
if "https://" in model_url
else f"https://huggingface.co/{model_url}/resolve/main/{model_name}.{quantization_type}.gguf"
)
if model_name != "mistrallite-7b"
else f"https://huggingface.co/TheBloke/MistralLite-7B-GGUF/resolve/main/mistrallite.{quantization_type}.gguf"
)
print(f"Downloading {model_name}...")
with requests.get(url, stream=True, allow_redirects=True) as r:
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

setup(
name="local-llm",
version="0.0.15",
version="0.0.16",
description="Local-LLM is a llama.cpp server in Docker with OpenAI Style Endpoints.",
long_description=long_description,
long_description_content_type="text/markdown",
Expand Down

0 comments on commit ce58a9b

Please sign in to comment.