Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Drop loguru and use builtin logging #1133

Merged
merged 3 commits into from
Oct 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 1 addition & 33 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 10 additions & 0 deletions private_gpt/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,11 @@
"""private-gpt."""
import logging

# Set to 'DEBUG' to have extensive logging turned on, even for libraries
ROOT_LOG_LEVEL = "INFO"

PRETTY_LOG_FORMAT = (
"%(asctime)s.%(msecs)03d [%(levelname)-8s] %(name)+25s - %(message)s"
)
logging.basicConfig(level=ROOT_LOG_LEVEL, format=PRETTY_LOG_FORMAT, datefmt="%H:%M:%S")
logging.captureWarnings(True)
5 changes: 4 additions & 1 deletion private_gpt/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,7 @@
from private_gpt.main import app
from private_gpt.settings.settings import settings

uvicorn.run(app, host="0.0.0.0", port=settings.server.port)
# Set log_config=None to do not use the uvicorn logging configuration, and
# use ours instead. For reference, see below:
# https://github.com/tiangolo/fastapi/discussions/7457#discussioncomment-5141108
uvicorn.run(app, host="0.0.0.0", port=settings.server.port, log_config=None)
5 changes: 4 additions & 1 deletion private_gpt/components/llm/custom/sagemaker.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import io
import json
import logging
from typing import TYPE_CHECKING, Any

import boto3 # type: ignore
Expand All @@ -26,6 +27,8 @@
CompletionResponseGen,
)

logger = logging.getLogger(__name__)


class LineIterator:
r"""A helper class for parsing the byte stream input from TGI container.
Expand Down Expand Up @@ -81,7 +84,7 @@ def __next__(self) -> Any:
continue
raise
if "PayloadPart" not in chunk:
print("Unknown event type:" + chunk)
logger.warning("Unknown event type=%s", chunk)
continue
self.buffer.seek(0, io.SEEK_END)
self.buffer.write(chunk["PayloadPart"]["Bytes"])
Expand Down
17 changes: 0 additions & 17 deletions private_gpt/main.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
"""FastAPI app creation, logger configuration and main API routes."""
import sys
from typing import Any

import llama_index
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from loguru import logger

from private_gpt.paths import docs_path
from private_gpt.server.chat.chat_router import chat_router
Expand All @@ -16,21 +14,6 @@
from private_gpt.server.ingest.ingest_router import ingest_router
from private_gpt.settings.settings import settings

# Remove pre-configured logging handler
logger.remove(0)
# Create a new logging handler same as the pre-configured one but with the extra
# attribute `request_id`
logger.add(
sys.stdout,
level="INFO",
format=(
"<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
"<level>{level: <8}</level> | "
"<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> | "
"ID: {extra[request_id]} - <level>{message}</level>"
),
)

# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")

Expand Down
5 changes: 4 additions & 1 deletion private_gpt/settings/settings_loader.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import functools
import logging
import os
import sys
from pathlib import Path
Expand All @@ -9,6 +10,8 @@
from private_gpt.constants import PROJECT_ROOT_PATH
from private_gpt.settings.yaml import load_yaml_with_envvars

logger = logging.getLogger(__name__)

_settings_folder = os.environ.get("PGPT_SETTINGS_FOLDER", PROJECT_ROOT_PATH)

# if running in unittest, use the test profile
Expand Down Expand Up @@ -41,7 +44,7 @@ def load_profile(profile: str) -> dict[str, Any]:

def load_active_profiles() -> dict[str, Any]:
"""Load active profiles and merge them."""
print(f"Starting application with profiles: {active_profiles}")
logger.info("Starting application with profiles=%s", active_profiles)
loaded_profiles = [load_profile(profile) for profile in active_profiles]
merged: dict[str, Any] = functools.reduce(deep_update, loaded_profiles, {})
return merged
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ authors = ["Zylon <[email protected]>"]
[tool.poetry.dependencies]
python = ">=3.11,<3.12"
fastapi = { extras = ["all"], version = "^0.103.1" }
loguru = "^0.7.2"
boto3 = "^1.28.56"
injector = "^0.21.0"
pyyaml = "^6.0.1"
Expand Down
Loading