Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -108,11 +108,23 @@ class timer:
|
|
108 |
print(f"{self.method} took {str(round(end - self.start, 2))}s")
|
109 |
|
110 |
def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]:
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
def pack_state(gs: Gaussian, mesh: MeshExtractResult, trial_id: str) -> dict:
|
117 |
return {
|
118 |
'gaussian': {
|
@@ -201,20 +213,24 @@ def deactivate_button() -> gr.Button:
|
|
201 |
|
202 |
@spaces.GPU
|
203 |
def text_to_image(prompt: str, height: int, width: int, steps: int, scales: float, seed: int) -> Image.Image:
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
|
|
|
|
|
|
|
|
218 |
generated_image = g.flux_pipe(
|
219 |
prompt=[formatted_prompt],
|
220 |
generator=torch.Generator().manual_seed(int(seed)),
|
@@ -225,13 +241,17 @@ def text_to_image(prompt: str, height: int, width: int, steps: int, scales: floa
|
|
225 |
max_sequence_length=256
|
226 |
).images[0]
|
227 |
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
|
|
|
|
|
|
|
|
235 |
|
236 |
# Gradio Interface
|
237 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
|
108 |
print(f"{self.method} took {str(round(end - self.start, 2))}s")
|
109 |
|
110 |
def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]:
|
111 |
+
if image is None:
|
112 |
+
print("Error: Input image is None")
|
113 |
+
return "", None
|
114 |
+
|
115 |
+
try:
|
116 |
+
trial_id = str(uuid.uuid4())
|
117 |
+
processed_image = g.trellis_pipeline.preprocess_image(image)
|
118 |
+
if processed_image is not None:
|
119 |
+
processed_image.save(f"{TMP_DIR}/{trial_id}.png")
|
120 |
+
return trial_id, processed_image
|
121 |
+
else:
|
122 |
+
print("Error: Processed image is None")
|
123 |
+
return "", None
|
124 |
+
except Exception as e:
|
125 |
+
print(f"Error in image preprocessing: {str(e)}")
|
126 |
+
return "", None
|
127 |
+
|
128 |
def pack_state(gs: Gaussian, mesh: MeshExtractResult, trial_id: str) -> dict:
|
129 |
return {
|
130 |
'gaussian': {
|
|
|
213 |
|
214 |
@spaces.GPU
|
215 |
def text_to_image(prompt: str, height: int, width: int, steps: int, scales: float, seed: int) -> Image.Image:
|
216 |
+
try:
|
217 |
+
# CUDA ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
218 |
+
if torch.cuda.is_available():
|
219 |
+
torch.cuda.empty_cache()
|
220 |
+
|
221 |
+
# ํ๊ธ ๊ฐ์ง ๋ฐ ๋ฒ์ญ
|
222 |
+
def contains_korean(text):
|
223 |
+
return any(ord('๊ฐ') <= ord(c) <= ord('ํฃ') for c in text)
|
224 |
+
|
225 |
+
# ํ๋กฌํํธ ์ ์ฒ๋ฆฌ
|
226 |
+
if contains_korean(prompt):
|
227 |
+
translated = g.translator(prompt)[0]['translation_text']
|
228 |
+
prompt = translated
|
229 |
+
|
230 |
+
# ํ๋กฌํํธ ํ์ ๊ฐ์
|
231 |
+
formatted_prompt = f"wbgmsst, 3D, {prompt}, white background"
|
232 |
+
|
233 |
+
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
|
234 |
generated_image = g.flux_pipe(
|
235 |
prompt=[formatted_prompt],
|
236 |
generator=torch.Generator().manual_seed(int(seed)),
|
|
|
241 |
max_sequence_length=256
|
242 |
).images[0]
|
243 |
|
244 |
+
if generated_image is not None:
|
245 |
+
trial_id = str(uuid.uuid4())
|
246 |
+
generated_image.save(f"{TMP_DIR}/{trial_id}.png")
|
247 |
+
return generated_image
|
248 |
+
else:
|
249 |
+
print("Error: Generated image is None")
|
250 |
+
return None
|
251 |
+
|
252 |
+
except Exception as e:
|
253 |
+
print(f"Error in image generation: {str(e)}")
|
254 |
+
return None
|
255 |
|
256 |
# Gradio Interface
|
257 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|