From 94298cb4b2d479a10759de66009f046a7f4e553d Mon Sep 17 00:00:00 2001 From: Niklas Marion Date: Fri, 22 Dec 2023 12:56:06 +0100 Subject: [PATCH] init --- .github/workflows/docker.yaml | 69 ++++++++++++++++++++++++++++ .gitignore | 4 ++ Dockerfile | 11 +++++ LICENSE | 21 +++++++++ main.py | 86 +++++++++++++++++++++++++++++++++++ requirements.txt | 3 ++ taf2db.py | 62 +++++++++++++++++++++++++ 7 files changed, 256 insertions(+) create mode 100644 .github/workflows/docker.yaml create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 main.py create mode 100644 requirements.txt create mode 100644 taf2db.py diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml new file mode 100644 index 0000000..74718af --- /dev/null +++ b/.github/workflows/docker.yaml @@ -0,0 +1,69 @@ +name: Docker + +on: + workflow_dispatch: + push: + branches: + - main +permissions: + actions: write + contents: read + packages: write + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + build: + name: 🐳 Build + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: ⬇️ Checkout repo + uses: actions/checkout@v4 + with: + access_token: ${{ github.token }} + + - name: 🐳 Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: ⚡️ Cache Docker layers + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + - name: 🔑 Github Registry Auth + uses: docker/login-action@v2 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: 👀 Extract metadata + id: meta + uses: docker/metadata-action@v4 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: 🐳 Build + uses: docker/build-push-action@v3 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + COMMIT_SHA=${{ github.sha }} + BRANCH=${{ github.head_ref || github.ref_name }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,mode=max,dest=/tmp/.buildx-cache-new + + - name: 🚚 Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..34e7e04 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +*.db +*.json +*.csv +__pycache__ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..603bbaf --- /dev/null +++ b/Dockerfile @@ -0,0 +1,11 @@ +FROM python:3.9 + +WORKDIR /code + +COPY ./requirements.txt /code/requirements.txt + +RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt + +COPY . /code/app + +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "80", "--proxy-headers"] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..412ebb6 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Niklas Marion + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 0000000..bf20248 --- /dev/null +++ b/main.py @@ -0,0 +1,86 @@ +from typing import Union + +from fastapi import FastAPI +import sqlite3 +from typing import Optional + +app = FastAPI(title="DLV", docs_url="/swagger", + openapi_url="/swagger-json", redoc_url=None) + + +def create_connection(): + conn = sqlite3.connect("file:stammdaten.db?mode=ro", + uri=True, isolation_level='IMMEDIATE') + return conn + + +def query_db(query, args=(), one=False): + cur = create_connection().cursor() + cur.execute(query, args) + r = [dict((cur.description[i][0], value) + for i, value in enumerate(row)) for row in cur.fetchall()] + cur.connection.close() + return (r[0] if r else None) if one else r + + +@app.get("/clubs/{lv}") +def read_clubs_by_lv(lv: str, q: Union[str, None] = None): + query = f"SELECT * FROM Club WHERE lv = '{lv}'" + clubs = query_db(query) + return clubs + + +@app.get("/lv") +def read_lv(): + query = f"SELECT DISTINCT lv FROM Club" + connection = create_connection() + lv = connection.execute(query).fetchall() + connection.close() + lv = [item for t in lv for item in t] + return lv + + +@app.get("/athletes/{guid}") +def get_athlete_by_guid(guid: str): + query = f"SELECT * FROM Athlete WHERE guid = '{guid}'" + athlete = query_db(query) + if (len(athlete) == 0): + return None + return athlete[0] + + +@app.get("/athletes") +def get_athletes( + firstname: Optional[str] = None, + lastname: Optional[str] = None, + clubId: Optional[str] = None, + worldAthleticsId: Optional[int] = None, + lv: Optional[str] = None, + limit: Optional[int] = 100, + page: Optional[int] = 0, +): + query = "SELECT Athlete.*,C.name as club,lv FROM Athlete JOIN main.Club C on Athlete.clubId = C.id" + + conditions = [] + + if firstname: + conditions.append(f"firstname LIKE '%{firstname}%'") + if lastname: + conditions.append(f"lastname LIKE '%{lastname}%'") + if clubId: + conditions.append(f"clubId = '{clubId}'") + if worldAthleticsId: + conditions.append(f"worldAthleticsId = '{worldAthleticsId}'") + if lv: + conditions.append(f"lv = '{lv}'") + + if conditions: + query += " WHERE " + " AND ".join(conditions) + + if (limit > 100): + limit = 100 + + query += f" LIMIT {limit} OFFSET {page * limit}" + + athletes = query_db(query) + return athletes diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..9483e53 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +pandas==2.1.4 +fastapi==0.105.0 +uvicorn[standard]==0.25.0 \ No newline at end of file diff --git a/taf2db.py b/taf2db.py new file mode 100644 index 0000000..5246440 --- /dev/null +++ b/taf2db.py @@ -0,0 +1,62 @@ +import pandas as pd +import sqlite3 +import os +from pathlib import Path +import argparse + +def main(taf_path) -> None: + conn = sqlite3.connect("stammdaten.db") + + clubs_path = Path(taf_path).joinpath("Settings/base_clubs.json") + + with open(clubs_path, encoding="utf-16-le", errors='ignore') as f: + data = f.read() + data = data[11:-1] + with open('clubs.json', 'w', encoding="utf-8") as f: + f.write(data) + + df = pd.read_json('clubs.json', encoding="utf-8-sig") + df = df[["LV", "Name", "ShortName", "Code", "Type"]] + df['Type'] = df['Type'].apply(lambda x: "CLUB" if x == 0 else "LG") + + df = df.rename(columns={"LV": "lv", "Name": "name", "ShortName": "shortName", "Code": "id", "Type": "type"}) + + df.to_sql('Club', conn, if_exists='replace', index=False) + os.remove('clubs.json') + + athletes_path = Path(taf_path).joinpath("Settings/base_athletes.json") + + with open(athletes_path, encoding="utf-16-le", errors='ignore') as f: + data = f.read() + data = data[11:-1] + with open('athletes.json', 'w', encoding="utf-8") as f: + f.write(data) + + df = pd.read_json('athletes.json', encoding="utf-8-sig") + df['WorldAthleticsId'] = df['WorldAthleticsId'].fillna(0) + + df = df[df['Code'] != ''] + df = df[df['ExternalId'].notnull()] + + df = df[["ExternalId", "Code", "Firstname", "Lastname", "ClubCode", "Nation", "Yob", "Gender", "WorldAthleticsId"]] + + df['WorldAthleticsId'] = df['WorldAthleticsId'].apply(lambda x: None if x == 0 else int(x)) + df["Gender"] = df['Gender'].apply(lambda x: "M" if x == 0 else "W") + + df = df.rename(columns={"Code": "id", "ExternalId": "guid", "Firstname": "firstname", "Lastname": "lastname", "ClubCode": "clubId", "Nation": "country", "Yob": "birthyear", "Gender": "sex", "WorldAthleticsId": "worldAthleticsId" }) + + df.to_sql('Athlete', conn, if_exists='replace', index=False) + os.remove('athletes.json') + + + conn.execute("DELETE FROM Athlete WHERE guid IN (SELECT guid FROM Athlete JOIN main.Club C on Athlete.clubId = C.id WHERE lv='')") + conn.execute("DELETE FROM Club WHERE lv=''"); + conn.commit() + conn.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--taf", required=True, dest="taf_path", help="Path to TAF folder") + args = parser.parse_args() + + main(args.taf_path) \ No newline at end of file