diff --git a/README.md b/README.md index fb3c704..0d0d878 100644 --- a/README.md +++ b/README.md @@ -5,5 +5,28 @@ This API is built using **FastAPI** and is designed to perform **sentiment analy 🌐 **Live Demo**: The API is deployed on **Azure Web Services** and can be accessed at the following link: [Emotion Sense API](https://emotionsense-dvemggcjagcqdxey.uaenorth-01.azurewebsites.net/) -🚨 **Note**: -Unfortunately, I can't keep the API live indefinitely as it incurs costs 💸. If the service is down or the link isn't valid anymore, feel free to clone the repository and run it locally to explore its features! +## POST /predict + +This endpoint takes in a text input and returns a sentiment prediction. The sentiment is classified as `positive`, `neutral`, or `negative` based on the analysis of the text. + +### Request: + +- **URL**: `/predict` +- **Method**: `POST` +- **Content-Type**: `application/json` +- **Request Body**: + - The request body should contain a string of text to be analyzed for sentiment. + +### Response: + +- **Content-Type**: `application/json` +- **Body**: A JSON object containing a `prediction` field with an integer value: + - `0` = neutral + - `1` = positive + - `2` = negative + +#### Example response: +```json +{ + "prediction": 1 +} diff --git a/main.py b/main.py index b9d37c2..7e282ae 100644 --- a/main.py +++ b/main.py @@ -10,26 +10,17 @@ import nltk import os import gdown -import zipfile +# Function to get the model and tokenizer from google drive instead of putting it in the repo def download_file_from_google_drive(file_id, output_path): url = f'https://drive.google.com/uc?id={file_id}' gdown.download(url, output_path, quiet=False) -# Example usage: download_file_from_google_drive('1qWkyNQXhcwlE-enuY0suIvsOy-5oRIkQ', './COVID_NLP5.keras') download_file_from_google_drive('12_AgHa0hiIPLeWQy51yovk8hpN6xzlo5', './tokenizer5.joblib') -# output = 'nltk_data.zip' -# download_file_from_google_drive('1t5t1bL2EJr1vEY0nMs0x1l50tFZSUXLP', output) - -# # Extract the zip file -# with zipfile.ZipFile(output, 'r') as zip_ref: -# zip_ref.extractall() # Extract to current directory - - # Define the directory where NLTK data will be stored nltk_data_dir = os.path.join(os.path.dirname(__file__), "nltk_data") @@ -37,11 +28,10 @@ def download_file_from_google_drive(file_id, output_path): # Add this directory to NLTK's data path nltk.data.path.append(nltk_data_dir) -nltk.download('punkt',download_dir=nltk_data_dir) # At first you have to download these nltk packages. -nltk.download('stopwords',download_dir=nltk_data_dir) -nltk.download('wordnet',download_dir=nltk_data_dir) -nltk.download('punkt_tab',download_dir=nltk_data_dir) - +nltk.download('punkt', download_dir=nltk_data_dir) # At first you have to download these nltk packages. +nltk.download('stopwords', download_dir=nltk_data_dir) +nltk.download('wordnet', download_dir=nltk_data_dir) +nltk.download('punkt_tab', download_dir=nltk_data_dir) from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer @@ -100,7 +90,7 @@ def nlp_preprocessing(tweet): @app.get("/") async def root(): - return {"message": "Hello World"} + return {"message": "Welcome to Emotion Sense API"} class PredictRequest(BaseModel):