Skip to content

Commit

Permalink
updated code
Browse files Browse the repository at this point in the history
  • Loading branch information
komi786 committed Oct 17, 2024
1 parent a5d0796 commit 4800246
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 32 deletions.
1 change: 1 addition & 0 deletions backend/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ dependencies = [
"faiss-cpu",
"langchain-together",
"simstring-fast",
"langchain_ollama",
"langchain_huggingface",
"langchain_groq",
"fastembed",
Expand Down
56 changes: 28 additions & 28 deletions backend/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -224,37 +224,37 @@ numpy==1.26.4
# rank-bm25
# scikit-learn
# scipy
# transformers
nvidia-cublas-cu12==12.1.3.1
# # transformers
# nvidia-cublas-cu12==12.1.3.1
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12==12.1.105
# via torch
nvidia-cuda-nvrtc-cu12==12.1.105
# via torch
nvidia-cuda-runtime-cu12==12.1.105
# via torch
nvidia-cudnn-cu12==9.1.0.70
# via torch
nvidia-cufft-cu12==11.0.2.54
# via torch
nvidia-curand-cu12==10.3.2.106
# via torch
nvidia-cusolver-cu12==11.4.5.107
# via torch
nvidia-cusparse-cu12==12.1.0.106
# via
# nvidia-cusolver-cu12
# torch
nvidia-nccl-cu12==2.20.5
# via torch
nvidia-nvjitlink-cu12==12.6.77
# via
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvtx-cu12==12.1.105
# nvidia-cuda-cupti-cu12==12.1.105
# # via torch
# nvidia-cuda-nvrtc-cu12==12.1.105
# # via torch
# nvidia-cuda-runtime-cu12==12.1.105
# # via torch
# nvidia-cudnn-cu12==9.1.0.70
# # via torch
# nvidia-cufft-cu12==11.0.2.54
# # via torch
# nvidia-curand-cu12==10.3.2.106
# # via torch
# nvidia-cusolver-cu12==11.4.5.107
# # via torch
# nvidia-cusparse-cu12==12.1.0.106
# # via
# # nvidia-cusolver-cu12
# # torch
# nvidia-nccl-cu12==2.20.5
# # via torch
# nvidia-nvjitlink-cu12==12.6.77
# # via
# # nvidia-cusolver-cu12
# # nvidia-cusparse-cu12
# nvidia-nvtx-cu12==12.1.105
# via torch
odfpy==1.4.1
# via pandas
Expand Down Expand Up @@ -473,7 +473,7 @@ transformers==4.43.4
# adapters
# langchain-huggingface
# sentence-transformers
triton==3.0.0
# triton==3.0.0
# via torch
typing-extensions==4.9.0
# via
Expand Down
8 changes: 4 additions & 4 deletions backend/src/mapping_generation/llm_chain.py
Original file line number Diff line number Diff line change
Expand Up @@ -493,7 +493,7 @@ def extract_information(query, model_name=LLM_ID, prompt=None):
)
result["rel"] = rel
result["full_query"] = query
print(f"extract_information result={result}")
print(f"extract information result after fixing={result}")
return QueryDecomposedModel(**result)

except ValidationError as e:
Expand All @@ -507,7 +507,7 @@ def extract_information(query, model_name=LLM_ID, prompt=None):
)
result["rel"] = rel
result["full_query"] = query
print(f"extract_information result={result}")
print(f"extract information result={result}")
return QueryDecomposedModel(**result)
# except Exception as e:
# logger.info(f"Error in prompt:{e}")
Expand Down Expand Up @@ -928,7 +928,7 @@ def pass_to_chat_llm_chain(
link_predictions_results = []

for _ in range(n_prompts): # Assume n_prompts is 3
ranking_prompt = generate_ranking_prompt(query=query,domain=domain,in_context=False)
ranking_prompt = generate_ranking_prompt(query=query,domain=domain,in_context=True)
ranking_results = get_llm_results(prompt=ranking_prompt, query=query, documents=documents, llm=model,llm_name=llm_name)
if ranking_results:
ranking_scores.extend(ranking_results)
Expand All @@ -938,7 +938,7 @@ def pass_to_chat_llm_chain(
logger.info(f"Exact match found in Ranking: {result['answer']} = {exact_match_found_rank}. Does it exist in original documents={result['answer'] in documents}")
link_predictions_results = []
if prompt_stage == 2:
link_prediction_prompt = generate_link_prediction_prompt(query, documents,domain=domain,in_context=False)
link_prediction_prompt = generate_link_prediction_prompt(query, documents,domain=domain,in_context=True)
lp_results = get_llm_results(prompt=link_prediction_prompt, query=query, documents=documents, llm=model,llm_name=llm_name)
if lp_results:
for res in lp_results:
Expand Down
1 change: 1 addition & 0 deletions backend/src/mapping_generation/manager_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,7 @@ def get_example_selector(context_key: str, examples: List[Dict[str, str]], k=4,
if context_key not in ExampleSelectorManager._selectors:
try:
if selector_path is None:

selector_path = f'../data/faiss_index_{context_key}'
os.makedirs(os.path.dirname(selector_path), exist_ok=True) # Create the directory if it doesn't exist
# Initialize the embeddings
Expand Down
2 changes: 2 additions & 0 deletions backend/src/mapping_generation/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,8 @@ def save_json_data(file_path, data):

def init_logger(log_file_path=LOG_FILE) -> logging.Logger:
# Create a logger
if not os.path.exists(os.path.dirname(log_file_path)):
os.makedirs(os.path.dirname(log_file_path))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) # Set the logging level to DEBUG
# Create a file handler
Expand Down

0 comments on commit 4800246

Please sign in to comment.