Hjgugugjhuhjggg
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,19 @@
|
|
1 |
import os
|
2 |
import json
|
|
|
3 |
import requests
|
4 |
from fastapi import FastAPI, HTTPException
|
5 |
from pydantic import BaseModel
|
6 |
from google.cloud import storage
|
7 |
from google.auth import exceptions
|
8 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
9 |
from io import BytesIO
|
10 |
from dotenv import load_dotenv
|
11 |
import uvicorn
|
12 |
import tempfile
|
|
|
|
|
|
|
13 |
|
14 |
load_dotenv()
|
15 |
|
@@ -49,6 +53,10 @@ class GCSHandler:
|
|
49 |
raise HTTPException(status_code=404, detail=f"File '{blob_name}' not found.")
|
50 |
return BytesIO(blob.download_as_bytes())
|
51 |
|
|
|
|
|
|
|
|
|
52 |
def download_model_from_huggingface(model_name):
|
53 |
url = f"https://huggingface.co/{model_name}/tree/main"
|
54 |
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
|
@@ -109,16 +117,72 @@ async def predict(request: DownloadModelRequest):
|
|
109 |
f.write(tokenizer_stream.read())
|
110 |
with open(model_path, 'wb') as f:
|
111 |
f.write(model_stream.read())
|
112 |
-
|
113 |
-
model = AutoModelForCausalLM.from_pretrained(tmp_dir
|
114 |
tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
-
pipeline_ = pipeline(request.pipeline_task, model=model, tokenizer=tokenizer)
|
117 |
-
|
118 |
-
result = pipeline_(request.input_text)
|
119 |
-
|
120 |
-
return {"response": result}
|
121 |
-
|
122 |
except HTTPException as e:
|
123 |
raise e
|
124 |
except Exception as e:
|
|
|
1 |
import os
|
2 |
import json
|
3 |
+
import uuid
|
4 |
import requests
|
5 |
from fastapi import FastAPI, HTTPException
|
6 |
from pydantic import BaseModel
|
7 |
from google.cloud import storage
|
8 |
from google.auth import exceptions
|
9 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
10 |
from io import BytesIO
|
11 |
from dotenv import load_dotenv
|
12 |
import uvicorn
|
13 |
import tempfile
|
14 |
+
from PIL import Image
|
15 |
+
import soundfile as sf
|
16 |
+
import torch
|
17 |
|
18 |
load_dotenv()
|
19 |
|
|
|
53 |
raise HTTPException(status_code=404, detail=f"File '{blob_name}' not found.")
|
54 |
return BytesIO(blob.download_as_bytes())
|
55 |
|
56 |
+
def generate_signed_url(self, blob_name, expiration=3600):
|
57 |
+
blob = self.bucket.blob(blob_name)
|
58 |
+
return blob.generate_signed_url(expiration=expiration)
|
59 |
+
|
60 |
def download_model_from_huggingface(model_name):
|
61 |
url = f"https://huggingface.co/{model_name}/tree/main"
|
62 |
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
|
|
|
117 |
f.write(tokenizer_stream.read())
|
118 |
with open(model_path, 'wb') as f:
|
119 |
f.write(model_stream.read())
|
120 |
+
|
121 |
+
model = AutoModelForCausalLM.from_pretrained(tmp_dir)
|
122 |
tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
|
123 |
+
|
124 |
+
pipe = pipeline(request.pipeline_task, model=model, tokenizer=tokenizer)
|
125 |
+
|
126 |
+
if request.pipeline_task in ["text-generation", "translation", "summarization"]:
|
127 |
+
result = pipe(request.input_text)
|
128 |
+
return {"response": result[0]}
|
129 |
+
|
130 |
+
elif request.pipeline_task == "image-generation":
|
131 |
+
try:
|
132 |
+
images = pipe(request.input_text)
|
133 |
+
image = images[0]
|
134 |
+
image_filename = f"{uuid.uuid4().hex}.png"
|
135 |
+
image_path = os.path.join(tmp_dir, image_filename)
|
136 |
+
image.save(image_path)
|
137 |
+
|
138 |
+
gcs_handler.upload_file(f"images/{image_filename}", open(image_path, "rb"))
|
139 |
+
image_url = gcs_handler.generate_signed_url(f"images/{image_filename}")
|
140 |
+
return {"response": {"image_url": image_url}}
|
141 |
+
except Exception as e:
|
142 |
+
raise HTTPException(status_code=400, detail="Error al generar la imagen.")
|
143 |
+
|
144 |
+
elif request.pipeline_task == "image-editing":
|
145 |
+
try:
|
146 |
+
edited_images = pipe(request.input_text)
|
147 |
+
edited_image = edited_images[0]
|
148 |
+
edited_image_filename = f"{uuid.uuid4().hex}_edited.png"
|
149 |
+
edited_image_path = os.path.join(tmp_dir, edited_image_filename)
|
150 |
+
edited_image.save(edited_image_path)
|
151 |
+
|
152 |
+
gcs_handler.upload_file(f"images/{edited_image_filename}", open(edited_image_path, "rb"))
|
153 |
+
edited_image_url = gcs_handler.generate_signed_url(f"images/{edited_image_filename}")
|
154 |
+
return {"response": {"edited_image_url": edited_image_url}}
|
155 |
+
except Exception as e:
|
156 |
+
raise HTTPException(status_code=400, detail="Error al editar la imagen.")
|
157 |
+
|
158 |
+
elif request.pipeline_task == "image-to-image":
|
159 |
+
try:
|
160 |
+
transformed_images = pipe(request.input_text)
|
161 |
+
transformed_image = transformed_images[0]
|
162 |
+
transformed_image_filename = f"{uuid.uuid4().hex}_transformed.png"
|
163 |
+
transformed_image_path = os.path.join(tmp_dir, transformed_image_filename)
|
164 |
+
transformed_image.save(transformed_image_path)
|
165 |
+
|
166 |
+
gcs_handler.upload_file(f"images/{transformed_image_filename}", open(transformed_image_path, "rb"))
|
167 |
+
transformed_image_url = gcs_handler.generate_signed_url(f"images/{transformed_image_filename}")
|
168 |
+
return {"response": {"transformed_image_url": transformed_image_url}}
|
169 |
+
except Exception as e:
|
170 |
+
raise HTTPException(status_code=400, detail="Error al transformar la imagen.")
|
171 |
+
|
172 |
+
elif request.pipeline_task == "text-to-3d":
|
173 |
+
try:
|
174 |
+
model_3d_filename = f"{uuid.uuid4().hex}.obj"
|
175 |
+
model_3d_path = os.path.join(tmp_dir, model_3d_filename)
|
176 |
+
|
177 |
+
with open(model_3d_path, "w") as f:
|
178 |
+
f.write("Simulated 3D model data")
|
179 |
+
|
180 |
+
gcs_handler.upload_file(f"3d-models/{model_3d_filename}", open(model_3d_path, "rb"))
|
181 |
+
model_3d_url = gcs_handler.generate_signed_url(f"3d-models/{model_3d_filename}")
|
182 |
+
return {"response": {"model_3d_url": model_3d_url}}
|
183 |
+
except Exception as e:
|
184 |
+
raise HTTPException(status_code=400, detail="Error al generar el modelo 3D.")
|
185 |
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
except HTTPException as e:
|
187 |
raise e
|
188 |
except Exception as e:
|