Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Encode all decisions at once #21

Merged
merged 8 commits into from
Dec 13, 2023
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 19 additions & 2 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "semantic-router"
version = "0.0.5"
version = "0.0.6"
description = "Super fast semantic router for AI decision making"
authors = [
"James Briggs <[email protected]>",
Expand All @@ -18,6 +18,7 @@ openai = "^0.28.1"
cohere = "^4.32"
numpy = "^1.25.2"
pinecone-text = "^0.7.0"
colorlog = "^6.8.0"


[tool.poetry.group.dev.dependencies]
Expand Down
12 changes: 9 additions & 3 deletions semantic_router/encoders/cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,16 @@ def __init__(
cohere_api_key = cohere_api_key or os.getenv("COHERE_API_KEY")
if cohere_api_key is None:
raise ValueError("Cohere API key cannot be 'None'.")
self.client = cohere.Client(cohere_api_key)
try:
self.client = cohere.Client(cohere_api_key)
except Exception as e:
raise ValueError(f"Cohere API client failed to initialize. Error: {e}")

def __call__(self, docs: list[str]) -> list[list[float]]:
if self.client is None:
raise ValueError("Cohere client is not initialized.")
embeds = self.client.embed(docs, input_type="search_query", model=self.name)
return embeds.embeddings
try:
embeds = self.client.embed(docs, input_type="search_query", model=self.name)
return embeds.embeddings
except Exception as e:
raise ValueError(f"Cohere API call failed. Error: {e}")
15 changes: 10 additions & 5 deletions semantic_router/encoders/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
from time import sleep

import openai
from openai.error import RateLimitError
from openai.error import RateLimitError, ServiceUnavailableError, OpenAIError

from semantic_router.encoders import BaseEncoder
from semantic_router.utils.logger import logger


class OpenAIEncoder(BaseEncoder):
Expand All @@ -19,17 +20,21 @@ def __call__(self, docs: list[str]) -> list[list[float]]:
vector embeddings.
"""
res = None
# exponential backoff in case of RateLimitError
error_message = ""

# exponential backoff
for j in range(5):
try:
logger.info(f"Encoding {len(docs)} documents...")
res = openai.Embedding.create(input=docs, engine=self.name)
if isinstance(res, dict) and "data" in res:
break
except RateLimitError:
except (RateLimitError, ServiceUnavailableError, OpenAIError) as e:
logger.warning(f"Retrying in {2**j} seconds...")
sleep(2**j)
error_message = str(e)
if not res or not isinstance(res, dict) or "data" not in res:
raise ValueError("Failed to create embeddings.")
raise ValueError(f"OpenAI API call failed. Error: {error_message}")

# get embeddings
embeds = [r["embedding"] for r in res["data"]]
return embeds
34 changes: 31 additions & 3 deletions semantic_router/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@

from semantic_router.encoders import (
BaseEncoder,
BM25Encoder,
CohereEncoder,
OpenAIEncoder,
BM25Encoder,
)
from semantic_router.linear import similarity_matrix, top_scores
from semantic_router.schema import Decision
Expand All @@ -29,8 +29,7 @@ def __init__(self, encoder: BaseEncoder, decisions: list[Decision] = []):
# if decisions list has been passed, we initialize index now
if decisions:
# initialize index now
for decision in tqdm(decisions):
self._add_decision(decision=decision)
self._add_decisions(decisions=decisions)

def __call__(self, text: str) -> str | None:
results = self._query(text)
Expand Down Expand Up @@ -61,6 +60,32 @@ def _add_decision(self, decision: Decision):
embed_arr = np.array(embeds)
self.index = np.concatenate([self.index, embed_arr])

def _add_decisions(self, decisions: list[Decision]):
# create embeddings for all decisions
all_utterances = [
utterance for decision in decisions for utterance in decision.utterances
]
embedded_utterance = self.encoder(all_utterances)

# create decision array
decision_names = [
decision.name for decision in decisions for _ in decision.utterances
]
decision_array = np.array(decision_names)
self.categories = (
np.concatenate([self.categories, decision_array])
if self.categories is not None
else decision_array
)

# create utterance array (the index)
embed_utterance_arr = np.array(embedded_utterance)
self.index = (
np.concatenate([self.index, embed_utterance_arr])
if self.index is not None
else embed_utterance_arr
)

def _query(self, text: str, top_k: int = 5):
"""Given some text, encodes and searches the index vector space to
retrieve the top_k most similar records.
Expand Down Expand Up @@ -172,6 +197,9 @@ def _add_decision(self, decision: Decision):
else:
self.sparse_index = np.concatenate([self.sparse_index, sparse_embeds])

def _add_decisions(self, decisions: list[Decision]):
raise NotImplementedError

def _query(self, text: str, top_k: int = 5):
"""Given some text, encodes and searches the index vector space to
retrieve the top_k most similar records.
Expand Down
52 changes: 52 additions & 0 deletions semantic_router/utils/logger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import logging

import colorlog


class CustomFormatter(colorlog.ColoredFormatter):
def __init__(self):
super().__init__(
"%(log_color)s%(asctime)s %(levelname)s %(name)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "bold_red",
},
reset=True,
style="%",
)


def add_coloured_handler(logger):
formatter = CustomFormatter()

console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)

logging.basicConfig(
datefmt="%Y-%m-%d %H:%M:%S",
format="%(log_color)s%(asctime)s %(levelname)s %(name)s %(message)s",
force=True,
)

logger.addHandler(console_handler)

return logger


def setup_custom_logger(name):
logger = logging.getLogger(name)
logger.handlers = []

add_coloured_handler(logger)

logger.setLevel(logging.INFO)
logger.propagate = False

return logger


logger = setup_custom_logger(__name__)
66 changes: 49 additions & 17 deletions walkthrough.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install -qU semantic-router==0.0.1"
"!pip install -qU semantic-router==0.0.6"
]
},
{
Expand All @@ -46,9 +46,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/jamesbriggs/opt/anaconda3/envs/decision-layer/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n",
"None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.\n"
]
}
],
"source": [
"from semantic_router.schema import Decision\n",
"\n",
Expand All @@ -74,7 +84,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -101,15 +111,15 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from semantic_router.encoders import CohereEncoder\n",
"from getpass import getpass\n",
"import os\n",
"\n",
"os.environ[\"COHERE_API_KEY\"] = os.environ[\"COHERE_API_KEY\"] or \\\n",
"os.environ[\"COHERE_API_KEY\"] = os.getenv(\"COHERE_API_KEY\") or \\\n",
" getpass(\"Enter Cohere API Key: \")\n",
"\n",
"encoder = CohereEncoder()"
Expand All @@ -124,11 +134,11 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"from semantic_router import DecisionLayer\n",
"from semantic_router.layer import DecisionLayer\n",
"\n",
"dl = DecisionLayer(encoder=encoder, decisions=decisions)"
]
Expand All @@ -142,18 +152,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'politics'"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"dl(\"don't you love politics?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'chitchat'"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"dl(\"how's the weather today?\")"
]
Expand All @@ -167,7 +199,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -198,7 +230,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
"version": "3.11.5"
}
},
"nbformat": 4,
Expand Down
Loading