Skip to content

Commit

Permalink
Big refactoring, splitting up large swiss_army_llama.py file into s…
Browse files Browse the repository at this point in the history
…everal new code files:

- `database_functions.py`
- `log_viewer_functions.py`
- `misc_utility_functions.py`
- `ramdisk_functions.py`
- `service_functions.py`
- `shared_resources.py`
- `uvicorn_config.py`

Also, introduction of Redis based locks to enable multiple uvicorn workers to run without stepping on each other's toes.

This allows multiple concurrent clients to be served, and also allows the log viewer to be used while the service is running without waiting.

Fixes to log viewing code.

Added uvloop to requirements.txt to speed up asyncio
  • Loading branch information
Dicklesworthstone committed Oct 2, 2023
1 parent 4f6faf9 commit deeaf8a
Show file tree
Hide file tree
Showing 18 changed files with 1,478 additions and 1,250 deletions.
1 change: 1 addition & 0 deletions .env
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ DEFAULT_MAX_COMPLETION_TOKENS=1000
DEFAULT_NUMBER_OF_COMPLETIONS_TO_GENERATE =1
DEFAULT_COMPLETION_TEMPERATURE=0.7
LLAMA_EMBEDDING_SERVER_LISTEN_PORT=8089
UVICORN_NUMBER_OF_WORKERS=2
MINIMUM_STRING_LENGTH_FOR_DOCUMENT_EMBEDDING=15
MAX_RETRIES=10
DB_WRITE_BATCH_SIZE=25
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ fast_vector_similarity
faster-whisper
textract
pytz
uvloop
```

## Running the Application
Expand Down Expand Up @@ -137,6 +138,7 @@ You can configure the service easily by editing the included `.env` file. Here's
- `DEFAULT_MODEL_NAME`: Default model name to use. (e.g., `yarn-llama-2-13b-128k`)
- `LLM_CONTEXT_SIZE_IN_TOKENS`: Context size in tokens for LLM. (e.g., `512`)
- `SWISS_ARMY_LLAMA_SERVER_LISTEN_PORT`: Port number for the service. (e.g., `8089`)
- `UVICORN_NUMBER_OF_WORKERS`: Number of workers for Uvicorn. (e.g., `2`)
- `MINIMUM_STRING_LENGTH_FOR_DOCUMENT_EMBEDDING`: Minimum string length for document embedding. (e.g., `15`)
- `MAX_RETRIES`: Maximum retries for locked database. (e.g., `10`)
- `DB_WRITE_BATCH_SIZE`: Database write batch size. (e.g., `25`)
Expand Down
162 changes: 162 additions & 0 deletions database_functions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
from embeddings_data_models import Base, TextEmbedding, DocumentEmbedding, Document, TokenLevelEmbedding, TokenLevelEmbeddingBundle, TokenLevelEmbeddingBundleCombinedFeatureVector, AudioTranscript
from logger_config import setup_logger
import traceback
import asyncio
import random
from sqlalchemy import select
from sqlalchemy import text as sql_text
from sqlalchemy.exc import SQLAlchemyError, OperationalError, IntegrityError
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
from datetime import datetime
from decouple import config

logger = setup_logger()
db_writer = None
DATABASE_URL = "sqlite+aiosqlite:///swiss_army_llama.sqlite"
MAX_RETRIES = config("MAX_RETRIES", default=3, cast=int)
DB_WRITE_BATCH_SIZE = config("DB_WRITE_BATCH_SIZE", default=25, cast=int)
RETRY_DELAY_BASE_SECONDS = config("RETRY_DELAY_BASE_SECONDS", default=1, cast=int)
JITTER_FACTOR = config("JITTER_FACTOR", default=0.1, cast=float)

engine = create_async_engine(DATABASE_URL, echo=False, connect_args={"check_same_thread": False})
AsyncSessionLocal = sessionmaker(
bind=engine,
class_=AsyncSession,
expire_on_commit=False,
autoflush=False
)
class DatabaseWriter:
def __init__(self, queue):
self.queue = queue
self.processing_hashes = set() # Set to store the hashes if everything that is currently being processed in the queue (to avoid duplicates of the same task being added to the queue)

def _get_hash_from_operation(self, operation):
attr_name = {
TextEmbedding: 'text_hash',
DocumentEmbedding: 'file_hash',
Document: 'document_hash',
TokenLevelEmbedding: 'token_hash',
TokenLevelEmbeddingBundle: 'input_text_hash',
TokenLevelEmbeddingBundleCombinedFeatureVector: 'combined_feature_vector_hash',
AudioTranscript: 'audio_file_hash'
}.get(type(operation))
hash_value = getattr(operation, attr_name, None)
llm_model_name = getattr(operation, 'llm_model_name', None)
return f"{hash_value}_{llm_model_name}" if hash_value and llm_model_name else None

async def initialize_processing_hashes(self, chunk_size=1000):
start_time = datetime.utcnow()
async with AsyncSessionLocal() as session:
queries = [
(select(TextEmbedding.text_hash, TextEmbedding.llm_model_name), True),
(select(DocumentEmbedding.file_hash, DocumentEmbedding.llm_model_name), True),
(select(Document.document_hash, Document.llm_model_name), True),
(select(TokenLevelEmbedding.token_hash, TokenLevelEmbedding.llm_model_name), True),
(select(TokenLevelEmbeddingBundle.input_text_hash, TokenLevelEmbeddingBundle.llm_model_name), True),
(select(TokenLevelEmbeddingBundleCombinedFeatureVector.combined_feature_vector_hash, TokenLevelEmbeddingBundleCombinedFeatureVector.llm_model_name), True),
(select(AudioTranscript.audio_file_hash), False)
]
for query, has_llm in queries:
offset = 0
while True:
result = await session.execute(query.limit(chunk_size).offset(offset))
rows = result.fetchall()
if not rows:
break
for row in rows:
if has_llm:
hash_with_model = f"{row[0]}_{row[1]}"
else:
hash_with_model = row[0]
self.processing_hashes.add(hash_with_model)
offset += chunk_size
end_time = datetime.utcnow()
total_time = (end_time - start_time).total_seconds()
if len(self.processing_hashes) > 0:
logger.info(f"Finished initializing set of input hash/llm_model_name combinations that are either currently being processed or have already been processed. Set size: {len(self.processing_hashes)}; Took {total_time} seconds, for an average of {total_time / len(self.processing_hashes)} seconds per hash.")

async def _handle_integrity_error(self, e, write_operation, session):
unique_constraint_msg = {
TextEmbedding: "token_embeddings.token_hash, token_embeddings.llm_model_name",
DocumentEmbedding: "document_embeddings.file_hash, document_embeddings.llm_model_name",
Document: "documents.document_hash, documents.llm_model_name",
TokenLevelEmbedding: "token_level_embeddings.token_hash, token_level_embeddings.llm_model_name",
TokenLevelEmbeddingBundle: "token_level_embedding_bundles.input_text_hash, token_level_embedding_bundles.llm_model_name",
AudioTranscript: "audio_transcripts.audio_file_hash"
}.get(type(write_operation))
if unique_constraint_msg and unique_constraint_msg in str(e):
logger.warning(f"Embedding already exists in the database for given input and llm_model_name: {e}")
await session.rollback()
else:
raise

async def dedicated_db_writer(self):
while True:
write_operations_batch = await self.queue.get()
async with AsyncSessionLocal() as session:
try:
for write_operation in write_operations_batch:
session.add(write_operation)
await session.flush() # Flush to get the IDs
await session.commit()
for write_operation in write_operations_batch:
hash_to_remove = self._get_hash_from_operation(write_operation)
if hash_to_remove is not None and hash_to_remove in self.processing_hashes:
self.processing_hashes.remove(hash_to_remove)
except IntegrityError as e:
await self._handle_integrity_error(e, write_operation, session)
except SQLAlchemyError as e:
logger.error(f"Database error: {e}")
await session.rollback()
except Exception as e:
tb = traceback.format_exc()
logger.error(f"Unexpected error: {e}\n{tb}")
await session.rollback()
self.queue.task_done()

async def enqueue_write(self, write_operations):
write_operations = [op for op in write_operations if self._get_hash_from_operation(op) not in self.processing_hashes] # Filter out write operations for hashes that are already being processed
if not write_operations: # If there are no write operations left after filtering, return early
return
for op in write_operations: # Add the hashes of the write operations to the set
hash_value = self._get_hash_from_operation(op)
if hash_value:
self.processing_hashes.add(hash_value)
await self.queue.put(write_operations)


async def execute_with_retry(func, *args, **kwargs):
retries = 0
while retries < MAX_RETRIES:
try:
return await func(*args, **kwargs)
except OperationalError as e:
if 'database is locked' in str(e):
retries += 1
sleep_time = RETRY_DELAY_BASE_SECONDS * (2 ** retries) + (random.random() * JITTER_FACTOR) # Implementing exponential backoff with jitter
logger.warning(f"Database is locked. Retrying ({retries}/{MAX_RETRIES})... Waiting for {sleep_time} seconds")
await asyncio.sleep(sleep_time)
else:
raise
raise OperationalError("Database is locked after multiple retries")

async def initialize_db():
logger.info("Initializing database, creating tables, and setting SQLite PRAGMAs...")
list_of_sqlite_pragma_strings = ["PRAGMA journal_mode=WAL;", "PRAGMA synchronous = NORMAL;", "PRAGMA cache_size = -1048576;", "PRAGMA busy_timeout = 2000;", "PRAGMA wal_autocheckpoint = 100;"]
list_of_sqlite_pragma_justification_strings = ["Set SQLite to use Write-Ahead Logging (WAL) mode (from default DELETE mode) so that reads and writes can occur simultaneously",
"Set synchronous mode to NORMAL (from FULL) so that writes are not blocked by reads",
"Set cache size to 1GB (from default 2MB) so that more data can be cached in memory and not read from disk; to make this 256MB, set it to -262144 instead",
"Increase the busy timeout to 2 seconds so that the database waits",
"Set the WAL autocheckpoint to 100 (from default 1000) so that the WAL file is checkpointed more frequently"]
assert(len(list_of_sqlite_pragma_strings) == len(list_of_sqlite_pragma_justification_strings))
async with engine.begin() as conn:
for pragma_string in list_of_sqlite_pragma_strings:
await conn.execute(sql_text(pragma_string))
logger.info(f"Executed SQLite PRAGMA: {pragma_string}")
logger.info(f"Justification: {list_of_sqlite_pragma_justification_strings[list_of_sqlite_pragma_strings.index(pragma_string)]}")
await conn.run_sync(Base.metadata.create_all) # Create tables if they don't exist
logger.info("Database initialization completed.")

def get_db_writer() -> DatabaseWriter:
return db_writer # Return the existing DatabaseWriter instance
File renamed without changes.
File renamed without changes
File renamed without changes
File renamed without changes.
20 changes: 10 additions & 10 deletions log_viewer_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ def highlight_rules_func(text):
text = text.replace('#COLOR13_OPEN#', '<span style="color: #f2ebd3;">').replace('#COLOR13_CLOSE#', '</span>')
return text


def show_logs_incremental_func(minutes: int, last_position: int):
new_logs = []
now = datetime.now(timezone('UTC')) # get current time, make it timezone-aware
Expand All @@ -75,21 +74,22 @@ def show_logs_incremental_func(minutes: int, last_position: int):


def show_logs_func(minutes: int = 5):
# read the entire log file and generate HTML with logs up to `minutes` minutes from now
with open(log_file_path, "r") as f:
lines = f.readlines()
logs = []
now = datetime.now(timezone('UTC')) # get current time, make it timezone-aware
now = datetime.now(timezone('UTC'))
for line in lines:
if line.strip() == "":
continue
if line[0].isdigit():
log_datetime_str = line.split(" - ")[0] # assuming the datetime is at the start of each line
log_datetime = datetime.strptime(log_datetime_str, "%Y-%m-%d %H:%M:%S,%f") # parse the datetime string to a datetime object
log_datetime = log_datetime.replace(tzinfo=timezone('UTC')) # set the datetime object timezone to UTC to match `now`
if now - log_datetime <= timedelta(minutes=minutes): # if the log is within `minutes` minutes from now
logs.append(highlight_rules_func(line.rstrip('\n'))) # add the highlighted log to the list and strip any newline at the end
logs_as_string = "<br>".join(logs) # joining with <br> directly
try:
log_datetime_str = line.split(" - ")[0]
log_datetime = datetime.strptime(log_datetime_str, "%Y-%m-%d %H:%M:%S,%f")
log_datetime = log_datetime.replace(tzinfo=timezone('UTC'))
if now - log_datetime <= timedelta(minutes=minutes):
logs.append(highlight_rules_func(line.rstrip('\n')))
except (ValueError, IndexError):
logs.append(highlight_rules_func(line.rstrip('\n'))) # Line didn't meet datetime parsing criteria, continue with processing
logs_as_string = "<br>".join(logs)
logs_as_string_newlines_rendered = logs_as_string.replace("\n", "<br>")
logs_as_string_newlines_rendered_font_specified = """
<html>
Expand Down
35 changes: 35 additions & 0 deletions logger_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import logging
import os
import shutil
import queue
from logging.handlers import RotatingFileHandler, QueueHandler, QueueListener

logger = logging.getLogger("swiss_army_llama")

def setup_logger():
if logger.handlers:
return logger
old_logs_dir = 'old_logs'
if not os.path.exists(old_logs_dir):
os.makedirs(old_logs_dir)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
log_file_path = 'swiss_army_llama.log'
log_queue = queue.Queue(-1) # Create a queue for the handlers
fh = RotatingFileHandler(log_file_path, maxBytes=10*1024*1024, backupCount=5)
fh.setFormatter(formatter)
def namer(default_log_name): # Function to move rotated logs to the old_logs directory
return os.path.join(old_logs_dir, os.path.basename(default_log_name))
def rotator(source, dest):
shutil.move(source, dest)
fh.namer = namer
fh.rotator = rotator
sh = logging.StreamHandler() # Stream handler
sh.setFormatter(formatter)
queue_handler = QueueHandler(log_queue) # Create QueueHandler
queue_handler.setFormatter(formatter)
logger.addHandler(queue_handler)
listener = QueueListener(log_queue, fh, sh) # Create QueueListener with real handlers
listener.start()
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING) # Configure SQLalchemy logging
return logger
Loading

0 comments on commit deeaf8a

Please sign in to comment.