Update main.py
Browse files
main.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
#pip install fastapi
|
2 |
#uvicorn main:app --reload
|
3 |
#import gradio as gr
|
4 |
-
|
5 |
from transformers import pipeline
|
6 |
from fastapi import FastAPI
|
7 |
|
@@ -13,6 +13,22 @@ generator = pipeline("text-generation", model="TheBloke/zephyr-7B-alpha-GGUF")
|
|
13 |
#model = AutoModel.from_pretrained("TheBloke/zephyr-7B-alpha-GGUF")
|
14 |
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
@app.get("/")
|
17 |
async def root():
|
18 |
return {"message": "Hello World"}
|
@@ -21,4 +37,5 @@ async def root():
|
|
21 |
@app.post("/predict")
|
22 |
async def root(text):
|
23 |
#return {"message": "Hello World"}
|
24 |
-
return generator(text,max_length=
|
|
|
|
1 |
#pip install fastapi
|
2 |
#uvicorn main:app --reload
|
3 |
#import gradio as gr
|
4 |
+
import torch
|
5 |
from transformers import pipeline
|
6 |
from fastapi import FastAPI
|
7 |
|
|
|
13 |
#model = AutoModel.from_pretrained("TheBloke/zephyr-7B-alpha-GGUF")
|
14 |
|
15 |
|
16 |
+
|
17 |
+
pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha", torch_dtype=torch.bfloat16, device_map="auto")
|
18 |
+
|
19 |
+
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
20 |
+
messages = [
|
21 |
+
{
|
22 |
+
"role": "system",
|
23 |
+
"content": "You are a Spiritual Coach who always responds in the most profound and poetic style",
|
24 |
+
},
|
25 |
+
{"role": "user", "content": "What is Life?"},
|
26 |
+
]
|
27 |
+
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
28 |
+
outputs = pipe(prompt, max_new_tokens=2560, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
29 |
+
print(outputs[0]["generated_text"])
|
30 |
+
|
31 |
+
|
32 |
@app.get("/")
|
33 |
async def root():
|
34 |
return {"message": "Hello World"}
|
|
|
37 |
@app.post("/predict")
|
38 |
async def root(text):
|
39 |
#return {"message": "Hello World"}
|
40 |
+
#return generator(text,max_length=2560, num_return_sequences=1)
|
41 |
+
return outputs[0]["generated_text"]
|