diff --git a/local_llm/__init__.py b/local_llm/__init__.py index f735771..4334c20 100644 --- a/local_llm/__init__.py +++ b/local_llm/__init__.py @@ -146,9 +146,13 @@ def get_model(model_name="Mistral-7B-OpenOrca", models_dir="models"): if DOWNLOAD_MODELS is False: raise Exception("Model not found.") url = ( - model_url - if "https://" in model_url - else f"https://huggingface.co/{model_url}/resolve/main/{model_name}.{quantization_type}.gguf" + ( + model_url + if "https://" in model_url + else f"https://huggingface.co/{model_url}/resolve/main/{model_name}.{quantization_type}.gguf" + ) + if model_name != "mistrallite-7b" + else f"https://huggingface.co/TheBloke/MistralLite-7B-GGUF/resolve/main/mistrallite.{quantization_type}.gguf" ) print(f"Downloading {model_name}...") with requests.get(url, stream=True, allow_redirects=True) as r: diff --git a/setup.py b/setup.py index 2775929..a1ea866 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( name="local-llm", - version="0.0.15", + version="0.0.16", description="Local-LLM is a llama.cpp server in Docker with OpenAI Style Endpoints.", long_description=long_description, long_description_content_type="text/markdown",