aiqtech commited on
Commit
0758696
ยท
verified ยท
1 Parent(s): 150c6e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -26
app.py CHANGED
@@ -108,11 +108,23 @@ class timer:
108
  print(f"{self.method} took {str(round(end - self.start, 2))}s")
109
 
110
  def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]:
111
- trial_id = str(uuid.uuid4())
112
- processed_image = g.trellis_pipeline.preprocess_image(image)
113
- processed_image.save(f"{TMP_DIR}/{trial_id}.png")
114
- return trial_id, processed_image
115
-
 
 
 
 
 
 
 
 
 
 
 
 
116
  def pack_state(gs: Gaussian, mesh: MeshExtractResult, trial_id: str) -> dict:
117
  return {
118
  'gaussian': {
@@ -201,20 +213,24 @@ def deactivate_button() -> gr.Button:
201
 
202
  @spaces.GPU
203
  def text_to_image(prompt: str, height: int, width: int, steps: int, scales: float, seed: int) -> Image.Image:
204
- # ํ•œ๊ธ€ ๊ฐ์ง€ ๋ฐ ๋ฒˆ์—ญ
205
- def contains_korean(text):
206
- return any(ord('๊ฐ€') <= ord(c) <= ord('ํžฃ') for c in text)
207
-
208
- # ํ”„๋กฌํ”„ํŠธ ์ „์ฒ˜๋ฆฌ
209
- if contains_korean(prompt):
210
- translated = g.translator(prompt)[0]['translation_text']
211
- prompt = translated
212
-
213
- # ํ”„๋กฌํ”„ํŠธ ํ˜•์‹ ๊ฐ•์ œ
214
- formatted_prompt = f"wbgmsst, 3D, {prompt}, white background"
215
-
216
- with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
217
- try:
 
 
 
 
218
  generated_image = g.flux_pipe(
219
  prompt=[formatted_prompt],
220
  generator=torch.Generator().manual_seed(int(seed)),
@@ -225,13 +241,17 @@ def text_to_image(prompt: str, height: int, width: int, steps: int, scales: floa
225
  max_sequence_length=256
226
  ).images[0]
227
 
228
- trial_id = str(uuid.uuid4())
229
- generated_image.save(f"{TMP_DIR}/{trial_id}.png")
230
- return generated_image
231
-
232
- except Exception as e:
233
- print(f"Error in image generation: {str(e)}")
234
- return None
 
 
 
 
235
 
236
  # Gradio Interface
237
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
 
108
  print(f"{self.method} took {str(round(end - self.start, 2))}s")
109
 
110
  def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]:
111
+ if image is None:
112
+ print("Error: Input image is None")
113
+ return "", None
114
+
115
+ try:
116
+ trial_id = str(uuid.uuid4())
117
+ processed_image = g.trellis_pipeline.preprocess_image(image)
118
+ if processed_image is not None:
119
+ processed_image.save(f"{TMP_DIR}/{trial_id}.png")
120
+ return trial_id, processed_image
121
+ else:
122
+ print("Error: Processed image is None")
123
+ return "", None
124
+ except Exception as e:
125
+ print(f"Error in image preprocessing: {str(e)}")
126
+ return "", None
127
+
128
  def pack_state(gs: Gaussian, mesh: MeshExtractResult, trial_id: str) -> dict:
129
  return {
130
  'gaussian': {
 
213
 
214
  @spaces.GPU
215
  def text_to_image(prompt: str, height: int, width: int, steps: int, scales: float, seed: int) -> Image.Image:
216
+ try:
217
+ # CUDA ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
218
+ if torch.cuda.is_available():
219
+ torch.cuda.empty_cache()
220
+
221
+ # ํ•œ๊ธ€ ๊ฐ์ง€ ๋ฐ ๋ฒˆ์—ญ
222
+ def contains_korean(text):
223
+ return any(ord('๊ฐ€') <= ord(c) <= ord('ํžฃ') for c in text)
224
+
225
+ # ํ”„๋กฌํ”„ํŠธ ์ „์ฒ˜๋ฆฌ
226
+ if contains_korean(prompt):
227
+ translated = g.translator(prompt)[0]['translation_text']
228
+ prompt = translated
229
+
230
+ # ํ”„๋กฌํ”„ํŠธ ํ˜•์‹ ๊ฐ•์ œ
231
+ formatted_prompt = f"wbgmsst, 3D, {prompt}, white background"
232
+
233
+ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
234
  generated_image = g.flux_pipe(
235
  prompt=[formatted_prompt],
236
  generator=torch.Generator().manual_seed(int(seed)),
 
241
  max_sequence_length=256
242
  ).images[0]
243
 
244
+ if generated_image is not None:
245
+ trial_id = str(uuid.uuid4())
246
+ generated_image.save(f"{TMP_DIR}/{trial_id}.png")
247
+ return generated_image
248
+ else:
249
+ print("Error: Generated image is None")
250
+ return None
251
+
252
+ except Exception as e:
253
+ print(f"Error in image generation: {str(e)}")
254
+ return None
255
 
256
  # Gradio Interface
257
  with gr.Blocks(theme=gr.themes.Soft()) as demo: