Spaces:
Runtime error
Runtime error
Add FastAPI app and Docker configuration
Browse files- .dockerignore +7 -0
- Dockerfile +20 -0
- app/__init__.py +0 -0
- app/__pycache__/__init__.cpython-311.pyc +0 -0
- app/__pycache__/classifier.cpython-311.pyc +0 -0
- app/__pycache__/main.cpython-311.pyc +0 -0
- app/__pycache__/model_utils.cpython-311.pyc +0 -0
- app/classifier.py +18 -0
- app/main.py +47 -0
- app/model_utils.py +17 -0
- requirements.txt +5 -0
.dockerignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
*.pyc
|
3 |
+
*.pyo
|
4 |
+
*.pyd
|
5 |
+
.Python
|
6 |
+
env/
|
7 |
+
venv/
|
Dockerfile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use an official Python runtime as a parent image
|
2 |
+
FROM python:3.9-slim
|
3 |
+
|
4 |
+
# Set the working directory in the container
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Copy the current directory contents into the container at /app
|
8 |
+
COPY . /app
|
9 |
+
|
10 |
+
# Install any needed packages specified in requirements.txt
|
11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
12 |
+
|
13 |
+
# Make port 8000 available to the world outside this container
|
14 |
+
EXPOSE 8000
|
15 |
+
|
16 |
+
# Define environment variable
|
17 |
+
ENV PYTHONUNBUFFERED=1
|
18 |
+
|
19 |
+
# Run uvicorn server
|
20 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
app/__init__.py
ADDED
File without changes
|
app/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (167 Bytes). View file
|
|
app/__pycache__/classifier.cpython-311.pyc
ADDED
Binary file (1.39 kB). View file
|
|
app/__pycache__/main.cpython-311.pyc
ADDED
Binary file (2.89 kB). View file
|
|
app/__pycache__/model_utils.cpython-311.pyc
ADDED
Binary file (1.63 kB). View file
|
|
app/classifier.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sklearn.linear_model import LogisticRegression
|
2 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
3 |
+
|
4 |
+
def train_classifier(dummy_data):
|
5 |
+
vectorizer = TfidfVectorizer()
|
6 |
+
train_texts, train_labels = zip(*dummy_data)
|
7 |
+
train_vectors = vectorizer.fit_transform(train_texts)
|
8 |
+
classifier = LogisticRegression()
|
9 |
+
classifier.fit(train_vectors, train_labels)
|
10 |
+
return classifier, vectorizer
|
11 |
+
|
12 |
+
def classify_text(text: str, classifier, vectorizer) -> str:
|
13 |
+
try:
|
14 |
+
transformed_data = vectorizer.transform([text])
|
15 |
+
category = classifier.predict(transformed_data)[0]
|
16 |
+
return category
|
17 |
+
except Exception as e:
|
18 |
+
return str(e)
|
app/main.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from app.model_utils import load_model_and_tokenizer, generate_summary
|
4 |
+
from app.classifier import train_classifier, classify_text
|
5 |
+
|
6 |
+
app = FastAPI()
|
7 |
+
|
8 |
+
# Load model and tokenizer for the /rag endpoint
|
9 |
+
model_name = "sshleifer/distilbart-cnn-6-6" # Example model
|
10 |
+
model, tokenizer = load_model_and_tokenizer(model_name)
|
11 |
+
|
12 |
+
# Dummy data and classifier for the /classification endpoint
|
13 |
+
dummy_data = [
|
14 |
+
("I feel very sad and hopeless.", "Depression"),
|
15 |
+
("I have trouble sleeping at night.", "Insomnia"),
|
16 |
+
("I am constantly worrying about everything.", "Anxiety"),
|
17 |
+
("I feel energetic and happy.", "Happiness"),
|
18 |
+
("My mood swings a lot and I feel irritable.", "Mood Disorder")
|
19 |
+
]
|
20 |
+
|
21 |
+
classifier, vectorizer = train_classifier(dummy_data)
|
22 |
+
|
23 |
+
class Prompt(BaseModel):
|
24 |
+
prompt: str
|
25 |
+
|
26 |
+
class ClassificationInput(BaseModel):
|
27 |
+
data: str
|
28 |
+
|
29 |
+
@app.post("/rag")
|
30 |
+
def rag_endpoint(prompt: Prompt):
|
31 |
+
try:
|
32 |
+
summary = generate_summary(prompt.prompt, model, tokenizer)
|
33 |
+
return {"summary": summary}
|
34 |
+
except Exception as e:
|
35 |
+
raise HTTPException(status_code=500, detail=str(e))
|
36 |
+
|
37 |
+
@app.post("/classification")
|
38 |
+
def classification_endpoint(input: ClassificationInput):
|
39 |
+
try:
|
40 |
+
category = classify_text(input.data, classifier, vectorizer)
|
41 |
+
return {"category": category}
|
42 |
+
except Exception as e:
|
43 |
+
raise HTTPException(status_code=500, detail=str(e))
|
44 |
+
|
45 |
+
if __name__ == "__main__":
|
46 |
+
import uvicorn
|
47 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
app/model_utils.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
2 |
+
|
3 |
+
def load_model_and_tokenizer(model_name: str):
|
4 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
5 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
6 |
+
return model, tokenizer
|
7 |
+
|
8 |
+
def generate_summary(prompt: str, model, tokenizer) -> str:
|
9 |
+
context_prompt = f"Provide a brief, informative article addressing the following mental health concern: {prompt}"
|
10 |
+
|
11 |
+
try:
|
12 |
+
inputs = tokenizer(context_prompt, return_tensors="pt", truncation=True, padding=True)
|
13 |
+
summary_ids = model.generate(inputs["input_ids"], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
|
14 |
+
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
15 |
+
return summary
|
16 |
+
except Exception as e:
|
17 |
+
return str(e)
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
transformers
|
4 |
+
torch
|
5 |
+
scikit-learn
|