Skip to content

Commit

Permalink
Merge pull request #107 from TogetherCrew/feat/106-remove-query-noise
Browse files Browse the repository at this point in the history
feat: using newer hivemind-backend lib version!
  • Loading branch information
amindadgar authored Dec 12, 2024
2 parents 1078dfb + c98fdb6 commit 6e0f1f5
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 17 deletions.
1 change: 1 addition & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ FROM python:3.11-bullseye AS base
WORKDIR /project
COPY . .
RUN pip3 install -r requirements.txt
RUN python -m spacy download en_core_web_sm

FROM base AS test
RUN chmod +x docker-entrypoint.sh
Expand Down
22 changes: 6 additions & 16 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,27 +1,17 @@
numpy
pymongo
python-dotenv
pgvector
asyncpg
psycopg2-binary
sqlalchemy[asyncio]
numpy==1.26.4
pandas==2.2.3
async-sqlalchemy
python-pptx
tc-neo4j-lib
tc-neo4j-lib==2.0.4
google-api-python-client
unstructured
cohere
neo4j>=5.14.1, <6.0.0
coverage>=7.3.3, <8.0.0
pytest>=7.4.3, <8.0.0
python-dotenv==1.0.0
tc-hivemind-backend==1.2.2
tc-hivemind-backend==1.4.2.post1
llama-index-question-gen-guidance==0.1.2
llama-index-vector-stores-postgres==0.1.2
llama-index-vector-stores-postgres
celery[redis]>=5.3.6, <6.0.0
guidance==0.1.14
tc-messageBroker==1.7.1
traceloop-sdk==0.14.1
traceloop-sdk==0.31.0
backoff==2.2.1
fastapi[standard]==0.114.1
faststream==0.5.28
Expand Down
15 changes: 14 additions & 1 deletion utils/query_engine/subquestion_engine.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,20 @@
from typing import List, Optional, Sequence, cast

import llama_index.core.instrumentation as instrument
from llama_index.core.async_utils import run_async_tasks
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.instrumentation.events.query import QueryEndEvent, QueryStartEvent
from llama_index.core.query_engine import SubQuestionAnswerPair, SubQuestionQueryEngine
from llama_index.core.question_gen.types import BaseQuestionGenerator
from llama_index.core.response_synthesizers import BaseSynthesizer
from llama_index.core.schema import NodeWithScore, QueryBundle
from llama_index.core.tools.query_engine import QueryEngineTool
from llama_index.core.utils import get_color_mapping, print_text

dispatcher = instrument.get_dispatcher(__name__)


class CustomSubQuestionQueryEngine(SubQuestionQueryEngine):
def __init__(
Expand Down Expand Up @@ -74,7 +78,16 @@ def _query(

return response, qa_pairs_all

@dispatcher.span
def query(
self, str_or_query_bundle: str | QueryBundle
) -> tuple[RESPONSE_TYPE, list[NodeWithScore]]:
return super().query(str_or_query_bundle)
dispatcher.event(QueryStartEvent(query=str_or_query_bundle))
with self.callback_manager.as_trace("query"):
if isinstance(str_or_query_bundle, str):
str_or_query_bundle = QueryBundle(str_or_query_bundle)
query_result, qa_pairs_all = self._query(str_or_query_bundle)
dispatcher.event(
QueryEndEvent(query=str_or_query_bundle, response=query_result)
)
return query_result, qa_pairs_all

0 comments on commit 6e0f1f5

Please sign in to comment.