From 34ceb7ce111cbb4619b7501839b90775f5cd53a0 Mon Sep 17 00:00:00 2001 From: Mohammad Amin Date: Mon, 8 Jan 2024 09:07:27 +0330 Subject: [PATCH] update: llama-index lib usage! we updated the library to the newest right version and we're chosed the right LLM for the guidance. note: the guidance_llm would create the subqueries. --- requirements.txt | 2 +- subquery.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index a780b05..0a35833 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ numpy -llama-index>=0.9.21, <1.0.0 +llama-index>=0.9.26, <1.0.0 pymongo python-dotenv pgvector diff --git a/subquery.py b/subquery.py index 73cbb55..58dfd21 100644 --- a/subquery.py +++ b/subquery.py @@ -1,4 +1,4 @@ -from guidance.models import OpenAI as GuidanceOpenAI +from guidance.models import OpenAIChat from llama_index import QueryBundle, ServiceContext from llama_index.core import BaseQueryEngine from llama_index.query_engine import SubQuestionQueryEngine @@ -94,7 +94,8 @@ def query_multiple_source( raise NotImplementedError question_gen = GuidanceQuestionGenerator.from_defaults( - guidance_llm=GuidanceOpenAI("text-davinci-003"), verbose=False + guidance_llm=OpenAIChat("gpt-3.5-turbo"), + verbose=False, ) embed_model = CohereEmbedding() service_context = ServiceContext.from_defaults(embed_model=embed_model)