Spaces:
Running
Running
mateoluksenberg
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ import pymupdf
|
|
11 |
import docx
|
12 |
from pptx import Presentation
|
13 |
|
14 |
-
from fastapi import FastAPI, File, UploadFile, HTTPException
|
15 |
from fastapi.responses import HTMLResponse
|
16 |
from fastapi.staticfiles import StaticFiles
|
17 |
from fastapi.responses import StreamingResponse
|
@@ -210,7 +210,61 @@ EXAMPLES = [
|
|
210 |
|
211 |
|
212 |
# Definir la función simple_chat
|
213 |
-
@spaces.GPU()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
def simple_chat(message: dict, temperature: float = 0.8, max_length: int = 4096, top_p: float = 1, top_k: int = 10, penalty: float = 1.0):
|
215 |
try:
|
216 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -223,12 +277,14 @@ def simple_chat(message: dict, temperature: float = 0.8, max_length: int = 4096,
|
|
223 |
conversation = []
|
224 |
|
225 |
if "file" in message and message["file"]:
|
226 |
-
|
227 |
-
|
|
|
|
|
228 |
if choice == "image":
|
229 |
conversation.append({"role": "user", "image": contents, "content": message["text"]})
|
230 |
elif choice == "doc":
|
231 |
-
format_msg = contents + "\n\n\n" + "{} files uploaded.\n" + message["text"]
|
232 |
conversation.append({"role": "user", "content": format_msg})
|
233 |
else:
|
234 |
conversation.append({"role": "user", "content": message["text"]})
|
@@ -253,16 +309,25 @@ def simple_chat(message: dict, temperature: float = 0.8, max_length: int = 4096,
|
|
253 |
except Exception as e:
|
254 |
return PlainTextResponse(f"Error: {str(e)}")
|
255 |
|
|
|
256 |
@app.post("/chat/")
|
257 |
-
async def test_endpoint(
|
258 |
-
|
|
|
|
|
|
|
259 |
raise HTTPException(status_code=400, detail="Missing 'text' in request body")
|
260 |
|
261 |
-
if
|
262 |
-
|
|
|
|
|
|
|
|
|
|
|
263 |
|
264 |
-
response = simple_chat(
|
265 |
-
return response
|
266 |
|
267 |
|
268 |
with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
|
|
|
11 |
import docx
|
12 |
from pptx import Presentation
|
13 |
|
14 |
+
from fastapi import FastAPI, File, Form, UploadFile, HTTPException
|
15 |
from fastapi.responses import HTMLResponse
|
16 |
from fastapi.staticfiles import StaticFiles
|
17 |
from fastapi.responses import StreamingResponse
|
|
|
210 |
|
211 |
|
212 |
# Definir la función simple_chat
|
213 |
+
# @spaces.GPU()
|
214 |
+
# def simple_chat(message: dict, temperature: float = 0.8, max_length: int = 4096, top_p: float = 1, top_k: int = 10, penalty: float = 1.0):
|
215 |
+
# try:
|
216 |
+
# model = AutoModelForCausalLM.from_pretrained(
|
217 |
+
# MODEL_ID,
|
218 |
+
# torch_dtype=torch.bfloat16,
|
219 |
+
# low_cpu_mem_usage=True,
|
220 |
+
# trust_remote_code=True
|
221 |
+
# )
|
222 |
+
|
223 |
+
# conversation = []
|
224 |
+
|
225 |
+
# if "file" in message and message["file"]:
|
226 |
+
# file_path = message["file"]
|
227 |
+
# choice, contents = mode_load(file_path)
|
228 |
+
# if choice == "image":
|
229 |
+
# conversation.append({"role": "user", "image": contents, "content": message["text"]})
|
230 |
+
# elif choice == "doc":
|
231 |
+
# format_msg = contents + "\n\n\n" + "{} files uploaded.\n" + message["text"]
|
232 |
+
# conversation.append({"role": "user", "content": format_msg})
|
233 |
+
# else:
|
234 |
+
# conversation.append({"role": "user", "content": message["text"]})
|
235 |
+
|
236 |
+
# input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device)
|
237 |
+
|
238 |
+
# generate_kwargs = dict(
|
239 |
+
# max_length=max_length,
|
240 |
+
# do_sample=True,
|
241 |
+
# top_p=top_p,
|
242 |
+
# top_k=top_k,
|
243 |
+
# temperature=temperature,
|
244 |
+
# repetition_penalty=penalty,
|
245 |
+
# eos_token_id=[151329, 151336, 151338],
|
246 |
+
# )
|
247 |
+
|
248 |
+
# with torch.no_grad():
|
249 |
+
# generated_ids = model.generate(input_ids['input_ids'], **generate_kwargs)
|
250 |
+
# generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
251 |
+
|
252 |
+
# return PlainTextResponse(generated_text)
|
253 |
+
# except Exception as e:
|
254 |
+
# return PlainTextResponse(f"Error: {str(e)}")
|
255 |
+
|
256 |
+
# @app.post("/chat/")
|
257 |
+
# async def test_endpoint(message: dict):
|
258 |
+
# if "text" not in message:
|
259 |
+
# raise HTTPException(status_code=400, detail="Missing 'text' in request body")
|
260 |
+
|
261 |
+
# if "file" not in message:
|
262 |
+
# print("Sin File")
|
263 |
+
|
264 |
+
# response = simple_chat(message)
|
265 |
+
# return response
|
266 |
+
|
267 |
+
|
268 |
def simple_chat(message: dict, temperature: float = 0.8, max_length: int = 4096, top_p: float = 1, top_k: int = 10, penalty: float = 1.0):
|
269 |
try:
|
270 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
277 |
conversation = []
|
278 |
|
279 |
if "file" in message and message["file"]:
|
280 |
+
file = message["file"]
|
281 |
+
# Leer el archivo recibido
|
282 |
+
file_contents = io.BytesIO(file).read()
|
283 |
+
choice, contents = mode_load(file_contents)
|
284 |
if choice == "image":
|
285 |
conversation.append({"role": "user", "image": contents, "content": message["text"]})
|
286 |
elif choice == "doc":
|
287 |
+
format_msg = contents + "\n\n\n" + "{} files uploaded.\n".format(file.filename) + message["text"]
|
288 |
conversation.append({"role": "user", "content": format_msg})
|
289 |
else:
|
290 |
conversation.append({"role": "user", "content": message["text"]})
|
|
|
309 |
except Exception as e:
|
310 |
return PlainTextResponse(f"Error: {str(e)}")
|
311 |
|
312 |
+
|
313 |
@app.post("/chat/")
|
314 |
+
async def test_endpoint(
|
315 |
+
text: str = Form(...),
|
316 |
+
file: UploadFile = File(None) # 'None' makes the file optional
|
317 |
+
):
|
318 |
+
if not text:
|
319 |
raise HTTPException(status_code=400, detail="Missing 'text' in request body")
|
320 |
|
321 |
+
if file:
|
322 |
+
# Process the file if it is provided
|
323 |
+
file_contents = await file.read()
|
324 |
+
# Do something with file_contents if needed
|
325 |
+
print("File received")
|
326 |
+
else:
|
327 |
+
print("No file received")
|
328 |
|
329 |
+
response = simple_chat(text)
|
330 |
+
return {"response": response}
|
331 |
|
332 |
|
333 |
with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
|