Spaces:
Sleeping
Sleeping
Use faq-canada-immigration model
Browse files- app.py +20 -3
- requirements.txt +6 -2
app.py
CHANGED
@@ -1,7 +1,24 @@
|
|
1 |
from fastapi import FastAPI
|
|
|
2 |
|
|
|
|
|
|
|
3 |
app = FastAPI()
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
|
4 |
+
tokenizer = AutoTokenizer.from_pretrained("mayanklad/faq-canada-immigration-tokenizer")
|
5 |
+
model = AutoModelForCausalLM.from_pretrained("mayanklad/faq-canada-immigration")
|
6 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
7 |
app = FastAPI()
|
8 |
|
9 |
+
# Define a function to handle the GET request at `/generate`
|
10 |
+
# The generate() function is defined as a FastAPI route that takes a
|
11 |
+
# string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response
|
12 |
+
# containing the generated text under the key "output"
|
13 |
+
@app.get("/generate")
|
14 |
+
def generate(text: str):
|
15 |
+
"""
|
16 |
+
Using the text-generation pipeline from `transformers`, generate text
|
17 |
+
from the given input text. The model used is `mayanklad/faq-canada-immigration`, which
|
18 |
+
can be found [here](<https://huggingface.co/mayanklad/faq-canada-immigration>).
|
19 |
+
"""
|
20 |
+
# Use the pipeline to generate text from the given input text
|
21 |
+
output = pipe(text)
|
22 |
+
|
23 |
+
# Return the generated text in a JSON response
|
24 |
+
return {"output": output[0]["generated_text"]}
|
requirements.txt
CHANGED
@@ -1,2 +1,6 @@
|
|
1 |
-
fastapi
|
2 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi==0.74.*
|
2 |
+
requests==2.27.*
|
3 |
+
uvicorn[standard]==0.17.*
|
4 |
+
sentencepiece==0.1.*
|
5 |
+
torch==1.11.*
|
6 |
+
transformers==4.*
|