Spaces:
Sleeping
Sleeping
Docker Stream
Browse files
main.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
from fastapi import FastAPI
|
|
|
2 |
from pydantic import BaseModel
|
3 |
from huggingface_hub import InferenceClient
|
4 |
import uvicorn
|
@@ -44,11 +45,13 @@ def generate(item: Item):
|
|
44 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
45 |
output = ""
|
46 |
|
|
|
|
|
|
|
47 |
for response in stream:
|
48 |
-
|
49 |
-
return output
|
50 |
|
51 |
@app.post("/generate/")
|
52 |
async def generate_text(item: Item):
|
53 |
-
return
|
54 |
|
|
|
1 |
from fastapi import FastAPI
|
2 |
+
from fastapi.responses import StreamingResponse
|
3 |
from pydantic import BaseModel
|
4 |
from huggingface_hub import InferenceClient
|
5 |
import uvicorn
|
|
|
45 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
46 |
output = ""
|
47 |
|
48 |
+
# for response in stream:
|
49 |
+
# output += response.token.text
|
50 |
+
# return output
|
51 |
for response in stream:
|
52 |
+
yield response.token.text
|
|
|
53 |
|
54 |
@app.post("/generate/")
|
55 |
async def generate_text(item: Item):
|
56 |
+
return StreamingResponse(generate(item), media_type="text/plain")
|
57 |
|