Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Tracing #175

Merged
merged 11 commits into from
Sep 27, 2024
2 changes: 1 addition & 1 deletion .devcontainer/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ RUN sudo apt-get update && sudo apt-get install -y \
# Install keyring-related and IPython packages in the same layer to reduce the image size and build time
COPY ./src/api/requirements.txt .
RUN pip install -r requirements.txt \
&& pip install keyrings.alt dbus-python ipython ipykernel mkdocs-material
&& pip install keyrings.alt dbus-python ipython ipykernel mkdocs-material

# Configure the IPython kernel
RUN ipython kernel install --name "python3" --user
Expand Down
21 changes: 21 additions & 0 deletions .github/workflows/evaluations.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,27 @@ jobs:
AZURE_CREDENTIALS: ${{ secrets.AZURE_CREDENTIALS }}
AZURE_OPENAI_ENDPOINT: ${{ vars.AZURE_OPENAI_ENDPOINT }}
AZURE_RESOURCE_GROUP: ${{ vars.AZURE_RESOURCE_GROUP }}
APPINSIGHTS_CONNECTIONSTRING: ${{ vars.APPINSIGHTS_CONNECTIONSTRING }}
AZURE_CONTAINER_ENVIRONMENT_NAME: ${{ vars.AZURE_CONTAINER_ENVIRONMENT_NAME }}
AZURE_CONTAINER_REGISTRY_ENDPOINT: ${{ vars.AZURE_CONTAINER_REGISTRY_ENDPOINT }}
AZURE_CONTAINER_REGISTRY_NAME: ${{ vars.AZURE_CONTAINER_REGISTRY_NAME }}
AZURE_COSMOS_NAME: ${{ vars.AZURE_COSMOS_NAME }}
AZURE_EMBEDDING_NAME: ${{ vars.AZURE_EMBEDDING_NAME }}
AZURE_ENV_NAME: ${{ vars.AZURE_ENV_NAME }}
AZURE_LOCATION: ${{ vars.AZURE_LOCATION }}
AZURE_OPENAI_API_VERSION: ${{ vars.AZURE_OPENAI_API_VERSION }}
AZURE_OPENAI_CHAT_DEPLOYMENT: ${{ vars.AZURE_OPENAI_CHAT_DEPLOYMENT }}
AZURE_OPENAI_NAME: ${{ vars.AZURE_OPENAI_NAME }}
AZURE_OPENAI_RESOURCE_GROUP_LOCATION: ${{ vars.AZURE_OPENAI_RESOURCE_GROUP_LOCATION }}
AZURE_SEARCH_ENDPOINT: ${{ vars.AZURE_SEARCH_ENDPOINT }}
AZURE_SEARCH_NAME: ${{ vars.AZURE_SEARCH_NAME }}
COSMOS_CONTAINER: ${{ vars.COSMOS_CONTAINER }}
COSMOS_ENDPOINT: ${{ vars.COSMOS_ENDPOINT }}
OPENAI_TYPE: ${{ vars.OPENAI_TYPE }}
SERVICE_ACA_IMAGE_NAME: ${{ vars.SERVICE_ACA_IMAGE_NAME }}
SERVICE_ACA_NAME: ${{ vars.SERVICE_ACA_NAME }}
SERVICE_ACA_URI: ${{ vars.SERVICE_ACA_URI }}

steps:
- name: checkout repo content
uses: actions/checkout@v4 # checkout the repository content
Expand Down
5 changes: 3 additions & 2 deletions src/api/contoso_chat/chat_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ def get_response(customerId, question, chat_history):
customer = get_customer(customerId)
print("customer complete")
context = product.find_products(question)
print(context)
print("products complete")
print("getting result...")

Expand All @@ -55,9 +54,11 @@ def get_response(customerId, question, chat_history):
inputs={"question": question, "customer": customer, "documentation": context},
configuration=model_config,
)
print("result: ", result)
return {"question": question, "answer": result, "context": context}

if __name__ == "__main__":
from tracing import init_tracing

tracer = init_tracing(local_tracing=False)
get_response(4, "What hiking jackets would you recommend?", [])
#get_response(argv[1], argv[2], argv[3])
4 changes: 0 additions & 4 deletions src/api/contoso_chat/product/product.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ def generate_embeddings(queries: List[str]) -> str:
api_version=os.environ["AZURE_OPENAI_API_VERSION"],
azure_ad_token_provider=token_provider
)
print("client:", client)
embeddings = client.embeddings.create(input=queries, model="text-embedding-ada-002")
embs = [emb.embedding for emb in embeddings.data]
items = [{"item": queries[i], "embedding": embs[i]} for i in range(len(queries))]
Expand Down Expand Up @@ -79,7 +78,6 @@ def retrieve_products(items: List[Dict[str, any]], index_name: str) -> str:

def find_products(context: str) -> Dict[str, any]:
# Get product queries
print("context:", context)
model_config = {
"azure_endpoint": os.environ["AZURE_OPENAI_ENDPOINT"],
"api_version": os.environ["AZURE_OPENAI_API_VERSION"],
Expand All @@ -89,13 +87,11 @@ def find_products(context: str) -> Dict[str, any]:
configuration=model_config,
inputs={"context":context}
)
print("queries:", queries)
qs = json.loads(queries)
# Generate embeddings
items = generate_embeddings(qs)
# Retrieve products
products = retrieve_products(items, "contoso-products")
print("products:", products)
return products


Expand Down
222,756 changes: 222,753 additions & 3 deletions src/api/evaluate-chat-flow.ipynb

Large diffs are not rendered by default.

9 changes: 7 additions & 2 deletions src/api/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,15 @@
from evaluators.custom_evals.groundedness import groundedness_evaluation
import jsonlines
import pandas as pd
from prompty.tracer import trace
from tracing import init_tracing
from contoso_chat.chat_request import get_response

# %% [markdown]
# ## Get output from data and save to results jsonl file

# %%
@trace
def load_data():
data_path = "./evaluators/data.jsonl"

Expand All @@ -22,7 +25,7 @@ def load_data():
return df

# %%

@trace
def create_response_data(df):
results = []

Expand All @@ -49,6 +52,7 @@ def create_response_data(df):
return results

# %%
@trace
def evaluate():
# Evaluate results from results file
results_path = 'result.jsonl'
Expand Down Expand Up @@ -88,6 +92,7 @@ def evaluate():
return df

# %%
@trace
def create_summary(df):
print("Evaluation summary:\n")
print(df)
Expand All @@ -105,7 +110,7 @@ def create_summary(df):
# %%
# create main funciton for python script
if __name__ == "__main__":

tracer = init_tracing(local_tracing=True)
test_data_df = load_data()
response_results = create_response_data(test_data_df)
result_evaluated = evaluate()
Expand Down
4 changes: 3 additions & 1 deletion src/api/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,14 @@
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
from tracing import init_tracing

from contoso_chat.chat_request import get_response

base = Path(__file__).resolve().parent

load_dotenv()
tracer = init_tracing()

app = FastAPI()

Expand All @@ -24,7 +26,7 @@
origin_5173 = f"https://{code_space}-5173.app.github.dev"
ingestion_endpoint = app_insights.split(';')[1].split('=')[1]

origins = [origin_8000, origin_5173, os.getenv("API_SERVICE_ACA_URI"), os.getenv("WEB_SERVICE_ACA_URI"), ingestion_endpoint]
origins = [origin_8000, origin_5173, os.getenv("SERVICE_ACA_URI")]
else:
origins = [
o.strip()
Expand Down
64 changes: 64 additions & 0 deletions src/api/tracing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import os
import json
import logging
import contextlib
from typing import AsyncIterator, List
from prompty.tracer import Tracer, PromptyTracer
from opentelemetry import trace as oteltrace
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.sdk.trace.sampling import ParentBasedTraceIdRatio
from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter

_tracer = "prompty"

@contextlib.contextmanager
def trace_span(name: str):
tracer = oteltrace.get_tracer(_tracer)
with tracer.start_as_current_span(name) as span:
def verbose_trace(key, value):
if isinstance(value, dict):
for k, v in value.items():
verbose_trace(f"{key}.{k}", v)
elif isinstance(value, (list, tuple)):
for index, item in enumerate(value):
span.set_attribute(f"{index}", str(item))
else:
span.set_attribute(f"{key}", value)
yield verbose_trace


def init_tracing(local_tracing: bool = False):
"""
Initialize tracing for the application
If local_tracing is True, use the PromptyTracer
If remote_tracing is True, use the OpenTelemetry tracer
If remote_tracing is not specified, defaults to using the OpenTelemetry tracer only if local_tracing is False
"""

if local_tracing:
local_trace = PromptyTracer()
Tracer.add("PromptyTracer", local_trace.tracer)
# Tracer.add("ConsoleTracer", console_tracer)
else:
Tracer.add("OpenTelemetry", trace_span)

azmon_logger = logging.getLogger("azure")
azmon_logger.setLevel(logging.INFO)

# oteltrace.set_tracer_provider(TracerProvider())

# Configure Azure Monitor as the Exporter
app_insights = os.getenv("APPINSIGHTS_CONNECTIONSTRING")

# Add the Azure exporter to the tracer provider

oteltrace.set_tracer_provider(TracerProvider(sampler=ParentBasedTraceIdRatio(1.0)))
oteltrace.get_tracer_provider().add_span_processor(BatchSpanProcessor(AzureMonitorTraceExporter(connection_string=app_insights)))
# oteltrace.get_tracer_provider().add_span_processor(
# SimpleSpanProcessor(trace_exporter)
# )

return oteltrace.get_tracer(_tracer)
Loading