Skip to content

Commit

Permalink
Update poetry lock (#1209)
Browse files Browse the repository at this point in the history
* Update the version of llama_index used to fix transient openai errors

* Update poetry.lock file

* Make `local` mode the default mode by default
  • Loading branch information
lopagela authored Nov 11, 2023
1 parent a22969a commit a579c9b
Show file tree
Hide file tree
Showing 14 changed files with 314 additions and 269 deletions.
517 changes: 273 additions & 244 deletions poetry.lock

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions private_gpt/components/vector_store/batched_chroma.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,12 @@ def __init__(
)
self.chroma_client = chroma_client

def add(self, nodes: list[BaseNode]) -> list[str]:
def add(self, nodes: list[BaseNode], **add_kwargs: Any) -> list[str]:
"""Add nodes to index, batching the insertion to avoid issues.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
add_kwargs: _
"""
if not self.chroma_client:
raise ValueError("Client not initialized")
Expand Down
7 changes: 5 additions & 2 deletions private_gpt/components/vector_store/vector_store_component.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,11 @@ def __init__(self) -> None:
"make_this_parameterizable_per_api_call"
) # TODO

self.vector_store = BatchedChromaVectorStore(
chroma_client=chroma_client, chroma_collection=chroma_collection
self.vector_store = typing.cast(
VectorStore,
BatchedChromaVectorStore(
chroma_client=chroma_client, chroma_collection=chroma_collection
),
)

@staticmethod
Expand Down
7 changes: 4 additions & 3 deletions private_gpt/open_ai/openai_models.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import time
import uuid
from collections.abc import Iterator
from typing import Literal

from llama_index.llms import ChatResponse, CompletionResponse
from pydantic import BaseModel, Field
Expand All @@ -21,7 +22,7 @@ class OpenAIMessage(BaseModel):
(providing a default response, not AI generated).
"""

role: str = Field(default="user", enum=["assistant", "system", "user"])
role: Literal["assistant", "system", "user"] = Field(default="user")
content: str | None


Expand All @@ -46,9 +47,9 @@ class OpenAICompletion(BaseModel):
"""

id: str
object: str = Field("completion", enum=["completion", "completion.chunk"])
object: Literal["completion", "completion.chunk"] = Field(default="completion")
created: int = Field(..., examples=[1623340000])
model: str = Field(enum=["private-gpt"])
model: Literal["private-gpt"]
choices: list[OpenAIChoice]

@classmethod
Expand Down
6 changes: 4 additions & 2 deletions private_gpt/server/chunks/chunks_router.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from typing import Literal

from fastapi import APIRouter
from pydantic import BaseModel, Field

Expand All @@ -16,8 +18,8 @@ class ChunksBody(BaseModel):


class ChunksResponse(BaseModel):
object: str = Field(enum=["list"])
model: str = Field(enum=["private-gpt"])
object: Literal["list"]
model: Literal["private-gpt"]
data: list[Chunk]


Expand Down
4 changes: 2 additions & 2 deletions private_gpt/server/chunks/chunks_service.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Literal

from injector import inject, singleton
from llama_index import ServiceContext, StorageContext, VectorStoreIndex
Expand All @@ -19,7 +19,7 @@


class Chunk(BaseModel):
object: str = Field(enum=["context.chunk"])
object: Literal["context.chunk"]
score: float = Field(examples=[0.023])
document: IngestedDoc
text: str = Field(examples=["Outbound sales increased 20%, driven by new leads."])
Expand Down
8 changes: 5 additions & 3 deletions private_gpt/server/embeddings/embeddings_router.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from typing import Literal

from fastapi import APIRouter
from pydantic import BaseModel, Field
from pydantic import BaseModel

from private_gpt.di import root_injector
from private_gpt.server.embeddings.embeddings_service import (
Expand All @@ -15,8 +17,8 @@ class EmbeddingsBody(BaseModel):


class EmbeddingsResponse(BaseModel):
object: str = Field(enum=["list"])
model: str = Field(enum=["private-gpt"])
object: Literal["list"]
model: Literal["private-gpt"]
data: list[Embedding]


Expand Down
4 changes: 3 additions & 1 deletion private_gpt/server/embeddings/embeddings_service.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from typing import Literal

from injector import inject, singleton
from pydantic import BaseModel, Field

Expand All @@ -6,7 +8,7 @@

class Embedding(BaseModel):
index: int
object: str = Field(enum=["embedding"])
object: Literal["embedding"]
embedding: list[float] = Field(examples=[[0.0023064255, -0.009327292]])


Expand Down
4 changes: 3 additions & 1 deletion private_gpt/server/health/health_router.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
from typing import Literal

from fastapi import APIRouter
from pydantic import BaseModel, Field

health_router = APIRouter()


class HealthResponse(BaseModel):
status: str = Field(enum=["ok"])
status: Literal["ok"] = Field(default="ok")


@health_router.get("/health", tags=["Health"])
Expand Down
8 changes: 5 additions & 3 deletions private_gpt/server/ingest/ingest_router.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from typing import Literal

from fastapi import APIRouter, HTTPException, UploadFile
from pydantic import BaseModel, Field
from pydantic import BaseModel

from private_gpt.di import root_injector
from private_gpt.server.ingest.ingest_service import IngestedDoc, IngestService
Expand All @@ -8,8 +10,8 @@


class IngestResponse(BaseModel):
object: str = Field(enum=["list"])
model: str = Field(enum=["private-gpt"])
object: Literal["list"]
model: Literal["private-gpt"]
data: list[IngestedDoc]


Expand Down
4 changes: 2 additions & 2 deletions private_gpt/server/ingest/ingest_service.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
import tempfile
from pathlib import Path
from typing import TYPE_CHECKING, Any, AnyStr
from typing import TYPE_CHECKING, Any, AnyStr, Literal

from injector import inject, singleton
from llama_index import (
Expand Down Expand Up @@ -40,7 +40,7 @@


class IngestedDoc(BaseModel):
object: str = Field(enum=["ingest.document"])
object: Literal["ingest.document"]
doc_id: str = Field(examples=["c202d5e6-7b69-4869-81cc-dd574ee8ee11"])
doc_metadata: dict[str, Any] | None = Field(
examples=[
Expand Down
4 changes: 3 additions & 1 deletion private_gpt/settings/settings.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from typing import Literal

from pydantic import BaseModel, Field

from private_gpt.settings.settings_loader import load_active_profiles
Expand Down Expand Up @@ -57,7 +59,7 @@ class DataSettings(BaseModel):


class LLMSettings(BaseModel):
mode: str = Field(enum=["local", "open_ai", "sagemaker", "mock"])
mode: Literal["local", "open_ai", "sagemaker", "mock"]


class LocalSettings(BaseModel):
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ injector = "^0.21.0"
pyyaml = "^6.0.1"
python-multipart = "^0.0.6"
pypdf = "^3.16.2"
llama-index = "0.8.47"
llama-index = "0.8.67"
chromadb = "^0.4.13"
watchdog = "^3.0.0"

Expand Down Expand Up @@ -138,4 +138,4 @@ asyncio_mode = "auto"
testpaths = ["tests"]
addopts = [
"--import-mode=importlib",
]
]
2 changes: 1 addition & 1 deletion settings.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ ui:
path: /

llm:
mode: mock
mode: local

local:
llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.1-GGUF
Expand Down

0 comments on commit a579c9b

Please sign in to comment.