Skip to content

Commit

Permalink
Merge pull request #195 from StephanAkkerman/feat/backend-overhaul
Browse files Browse the repository at this point in the history
Backend overhaul
  • Loading branch information
StephanAkkerman authored Jan 5, 2025
2 parents 17452d0 + f8be519 commit c870bc1
Show file tree
Hide file tree
Showing 108 changed files with 494 additions and 874 deletions.
18 changes: 9 additions & 9 deletions .github/ISSUE_TEMPLATE/feature_request.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,22 @@ labels: ''
assignees: ''

---
<!-- Note: ignore the text in between the <!- and -> markers. They are comments and will not be displayed in the final issue. -->

1. Description:

- Problem:
<!-- Briefly describe the issue or gap that needs to be addressed. -->
- Problem: <!-- Briefly describe the issue or gap that needs to be addressed. -->

- Solution:
<!-- Outline the proposed action or changes to resolve the problem. If none yet, leave blank -->

- Prerequisites:
<!-- List any requirements or dependencies needed before starting. -->
- Solution: <!-- Outline the proposed action or changes to resolve the problem. If none yet, leave blank -->

2. Tasks:

- Prerequisites: <!-- List any requirements or dependencies needed before starting. -->


2. Tasks: <!-- List the tasks that need to be completed to implement the feature. -->
- [ ] Task 1
- [ ] Task 2
- [ ] Task 3

3. Additional context
<!-- Add any other context or screenshots about the feature request here. -->
3. Additional context <!-- Add any other context or screenshots about the feature request here. -->
3 changes: 1 addition & 2 deletions .github/workflows/pytests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@ on:
- 'requirements.txt' # Dependency file
- 'setup.py' # Setup script
- 'pyproject.toml' # Modern Python project configuration
# Add any other relevant paths as needed

permissions:
contents: read

Expand All @@ -26,6 +24,7 @@ jobs:

- name: Install dependencies
run: |
cd backend
python -m pip install --upgrade pip
pip install pytest pytest-mock
pip install -e .
Expand Down
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ Before starting, make sure you have the following requirements:
We have bundled all required dependencies into a package for easy installation. To get started, simply run one of the following commands:

```bash
pip install .
pip install backend/.
```

or install directly from the repository:
Expand Down Expand Up @@ -81,7 +81,7 @@ If you prefer to build from source, follow these steps:
3. Install the dependencies:

```bash
pip install -r requirements.txt
pip install -r backend/requirements.txt
```

### Install with GPU Support (Recommended)
Expand All @@ -90,7 +90,7 @@ If you would like to run the code on a GPU, you can install the `torch` package
After installing the required dependencies, run the following command:

```bash
pip install -r requirements/gpu.txt
pip install -r backend/gpu-requirements.txt
```

## Usage ⌨️
Expand Down
File renamed without changes.
47 changes: 47 additions & 0 deletions backend/fluentai/api/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import argparse

import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware

from fluentai.api.routes.anki import anki_router
from fluentai.api.routes.create_card import create_card_router
from fluentai.utils.load_models import download_all_models

# Initialize FastAPI app
app = FastAPI()

# Configure CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=[
"http://localhost:3000",
"https://akkerman.ai",
],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)

app.include_router(anki_router)
app.include_router(create_card_router)


def main():
"""Start the FastAPI application."""
# Start by downloading all models
download_all_models()

parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--host", type=str, default="127.0.0.1", help="Hosting default: 127.0.0.1"
)
parser.add_argument("--port", type=int, default=8000)

args = parser.parse_args()

uvicorn.run("app:app", host=args.host, port=args.port)


if __name__ == "__main__":
main()
51 changes: 51 additions & 0 deletions backend/fluentai/api/routes/anki.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import httpx
from fastapi import APIRouter, Request
from fastapi.responses import JSONResponse

anki_router = APIRouter()


@anki_router.post("/api/anki")
async def anki_proxy(request: Request):
"""
Proxy API endpoint for forwarding requests to the Anki server.
This function receives a JSON request from the client, forwards it to the Anki
server running on localhost, and returns the response back to the client.
HACK: This uses the backend as a proxy for when the frontend is deployed in GH Pages
Parameters
----------
request : Request
The incoming HTTP request object containing the JSON payload to be forwarded.
Returns
-------
JSONResponse
A JSON response containing the Anki server response or an error message if
the request fails.
"""
try:
# Forward the incoming request body to the Anki server
request_body = await request.json()

async with httpx.AsyncClient() as client:
response = await client.post(
"http://127.0.0.1:8765", # Assuming Anki is running on localhost with default port
json=request_body,
)

# Return the JSON response from Anki server
return JSONResponse(content=response.json(), status_code=response.status_code)

except httpx.RequestError as e:
return JSONResponse(
content={"error": "Failed to connect to Anki server.", "details": str(e)},
status_code=500,
)
except Exception as e:
return JSONResponse(
content={"error": "An unexpected error occurred.", "details": str(e)},
status_code=500,
)
Original file line number Diff line number Diff line change
@@ -1,33 +1,16 @@
import argparse
import base64
import os

import httpx
import uvicorn
from constants.languages import G2P_LANGCODES, G2P_LANGUAGES
from fastapi import FastAPI, HTTPException, Query, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi import APIRouter, HTTPException, Query
from fastapi.responses import JSONResponse
from pydantic import BaseModel

from fluentai.services.card_gen.constants.config import config
from fluentai.services.card_gen.main import generate_mnemonic_img
from fluentai.services.card_gen.utils.load_models import download_all_models
from fluentai.services.card_gen.utils.logger import logger
from fluentai.constants.config import config
from fluentai.constants.languages import G2P_LANGCODES, G2P_LANGUAGES
from fluentai.logger import logger
from fluentai.run import MnemonicPipeline

app = FastAPI()

# Allow all origins for development (adjust in production)
app.add_middleware(
CORSMiddleware,
allow_origins=[
"http://localhost:3000",
"https://akkerman.ai",
], # Replace "*" with your front-end URL in production
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
create_card_router = APIRouter()


# Define Pydantic models for request and responses
Expand All @@ -41,7 +24,7 @@ class CreateCardResponse(BaseModel):
recording: str = None # Placeholder for future implementation


@app.post("/create_card/word_data", response_model=CreateCardResponse)
@create_card_router.post("/create_card/word_data", response_model=CreateCardResponse)
async def api_generate_mnemonic(request: CreateCardRequest) -> dict:
"""
Calls the main function to generate a mnemonic for a given word and language code.
Expand Down Expand Up @@ -85,7 +68,7 @@ async def api_generate_mnemonic(request: CreateCardRequest) -> dict:
raise HTTPException(status_code=500, detail="Internal Server Error")


@app.get("/create_card/img")
@create_card_router.get("/create_card/img")
async def get_image(
word: str = Query(...),
language_code: str = Query(...),
Expand Down Expand Up @@ -127,9 +110,13 @@ async def get_image(
if language_code not in G2P_LANGUAGES:
raise HTTPException(status_code=400, detail="Invalid language code")

mnemonic_pipe = MnemonicPipeline()

try:
image_path, verbal_cue, translation, tts_path, ipa = generate_mnemonic_img(
word, language_code, llm_model, image_model, keyword, key_sentence
image_path, verbal_cue, translation, tts_path, ipa = (
await mnemonic_pipe.generate_mnemonic_img(
word, language_code, llm_model, image_model, keyword, key_sentence
)
)

if not os.path.exists(image_path):
Expand Down Expand Up @@ -157,7 +144,7 @@ async def get_image(
raise HTTPException(status_code=500, detail=f"Internal Server Error: {e}")


@app.get("/create_card/supported_languages")
@create_card_router.get("/create_card/supported_languages")
async def get_supported_languages() -> JSONResponse:
"""
Returns a list of languages that the backend supports.
Expand All @@ -170,7 +157,7 @@ async def get_supported_languages() -> JSONResponse:
return JSONResponse(content={"languages": G2P_LANGCODES})


@app.get("/create_card/image_models")
@create_card_router.get("/create_card/image_models")
async def get_image_models() -> JSONResponse:
"""
Returns a list of available image generation models, with the recommended model at the top.
Expand All @@ -191,7 +178,7 @@ async def get_image_models() -> JSONResponse:
return JSONResponse(content={"models": available_models})


@app.get("/create_card/llm_models")
@create_card_router.get("/create_card/llm_models")
async def get_llm_models() -> JSONResponse:
"""
Returns a list of available LLM models, with the recommended model at the top.
Expand All @@ -211,65 +198,3 @@ async def get_llm_models() -> JSONResponse:
available_models = [recommended_model] + models["all"]

return JSONResponse(content={"models": available_models})


# HACK: This uses the backend as a proxy for when the frontend is deployed in GH Pages


@app.post("/api/anki")
async def anki_proxy(request: Request):
"""
Proxy API endpoint for forwarding requests to the Anki server.
This function receives a JSON request from the client, forwards it to the Anki
server running on localhost, and returns the response back to the client.
Parameters
----------
request : Request
The incoming HTTP request object containing the JSON payload to be forwarded.
Returns
-------
JSONResponse
A JSON response containing the Anki server response or an error message if
the request fails.
"""
try:
# Forward the incoming request body to the Anki server
request_body = await request.json()

async with httpx.AsyncClient() as client:
response = await client.post(
"http://127.0.0.1:8765", # Assuming Anki is running on localhost with default port
json=request_body,
)

# Return the JSON response from Anki server
return JSONResponse(content=response.json(), status_code=response.status_code)

except httpx.RequestError as e:
return JSONResponse(
content={"error": "Failed to connect to Anki server.", "details": str(e)},
status_code=500,
)
except Exception as e:
return JSONResponse(
content={"error": "An unexpected error occurred.", "details": str(e)},
status_code=500,
)


if __name__ == "__main__":
# Start by downloading all models
download_all_models()

parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--host", type=str, default="127.0.0.1", help="Hosting default: 127.0.0.1"
)
parser.add_argument("--port", type=int, default=8000)

args = parser.parse_args()

uvicorn.run("api:app", host=args.host, port=args.port)
File renamed without changes.
File renamed without changes.
7 changes: 7 additions & 0 deletions backend/fluentai/constants/languages.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import json

from fluentai.constants.config import config

with open(config.get("G2P").get("LANGUAGE_JSON")) as f:
G2P_LANGCODES = json.load(f)
G2P_LANGUAGES: dict = dict(map(reversed, G2P_LANGCODES.items()))
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import os
import sys

from fluentai.services.card_gen.constants.config import config
from fluentai.constants.config import config


class UTF8StreamHandler(logging.StreamHandler):
Expand Down
Loading

0 comments on commit c870bc1

Please sign in to comment.