aiqtech commited on
Commit
c201c52
ยท
verified ยท
1 Parent(s): d88c997

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -52
app.py CHANGED
@@ -208,7 +208,11 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
208
  return None, None
209
 
210
  try:
211
- clear_gpu_memory()
 
 
 
 
212
 
213
  if randomize_seed:
214
  seed = np.random.randint(0, MAX_SEED)
@@ -231,58 +235,77 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
231
  image = image.resize(new_size, Image.LANCZOS)
232
  print(f"Resized image to: {image.size}")
233
 
234
- with spaces.GPU(), torch.inference_mode():
235
- # 3D ์ƒ์„ฑ
236
- g.trellis_pipeline.to('cuda')
237
- outputs = g.trellis_pipeline.run(
238
- image,
239
- seed=seed,
240
- formats=["gaussian", "mesh"],
241
- preprocess_image=False,
242
- sparse_structure_sampler_params={
243
- "steps": min(ss_sampling_steps, 12),
244
- "cfg_strength": ss_guidance_strength,
245
- },
246
- slat_sampler_params={
247
- "steps": min(slat_sampling_steps, 12),
248
- "cfg_strength": slat_guidance_strength,
249
- },
250
- )
251
-
252
- # ๋น„๋””์˜ค ๋ Œ๋”๋ง
253
- video = render_utils.render_video(
254
- outputs['gaussian'][0],
255
- num_frames=60,
256
- resolution=512
257
- )['color']
258
-
259
- video_geo = render_utils.render_video(
260
- outputs['mesh'][0],
261
- num_frames=60,
262
- resolution=512
263
- )['normal']
264
-
265
- # CPU๋กœ ๋ฐ์ดํ„ฐ ์ด๋™
266
- video = [v.cpu().numpy() if torch.is_tensor(v) else v for v in video]
267
- video_geo = [v.cpu().numpy() if torch.is_tensor(v) else v for v in video_geo]
268
-
269
- video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
270
- new_trial_id = str(uuid.uuid4())
271
- video_path = f"{TMP_DIR}/{new_trial_id}.mp4"
272
- os.makedirs(os.path.dirname(video_path), exist_ok=True)
273
- imageio.mimsave(video_path, video, fps=15)
274
-
275
- state = pack_state(outputs['gaussian'][0], outputs['mesh'][0], new_trial_id)
276
-
277
- return state, video_path
278
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  except Exception as e:
280
  print(f"Error in image_to_3d: {str(e)}")
281
- return None, None
282
- finally:
283
  if hasattr(g.trellis_pipeline, 'to'):
284
  g.trellis_pipeline.to('cpu')
285
- clear_gpu_memory()
 
 
 
 
286
 
287
  def clear_gpu_memory():
288
  """GPU ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์ •๋ฆฌํ•˜๋Š” ์œ ํ‹ธ๋ฆฌํ‹ฐ ํ•จ์ˆ˜"""
@@ -326,7 +349,11 @@ def deactivate_button() -> gr.Button:
326
  @spaces.GPU
327
  def text_to_image(prompt: str, height: int, width: int, steps: int, scales: float, seed: int) -> Image.Image:
328
  try:
329
- clear_gpu_memory()
 
 
 
 
330
 
331
  # ํ•œ๊ธ€ ๊ฐ์ง€ ๋ฐ ๋ฒˆ์—ญ
332
  def contains_korean(text):
@@ -343,7 +370,7 @@ def text_to_image(prompt: str, height: int, width: int, steps: int, scales: floa
343
  width = min(width, 512)
344
  steps = min(steps, 12)
345
 
346
- with spaces.GPU(), torch.inference_mode():
347
  generated_image = g.flux_pipe(
348
  prompt=[formatted_prompt],
349
  generator=torch.Generator('cuda').manual_seed(int(seed)),
@@ -368,7 +395,10 @@ def text_to_image(prompt: str, height: int, width: int, steps: int, scales: floa
368
  print(f"Error in image generation: {str(e)}")
369
  return None
370
  finally:
371
- clear_gpu_memory()
 
 
 
372
 
373
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
374
  gr.Markdown("""## Craft3D""")
 
208
  return None, None
209
 
210
  try:
211
+ # CUDA ๋ฉ”๋ชจ๋ฆฌ ์ดˆ๊ธฐํ™”
212
+ if torch.cuda.is_available():
213
+ torch.cuda.empty_cache()
214
+ torch.cuda.synchronize()
215
+ gc.collect()
216
 
217
  if randomize_seed:
218
  seed = np.random.randint(0, MAX_SEED)
 
235
  image = image.resize(new_size, Image.LANCZOS)
236
  print(f"Resized image to: {image.size}")
237
 
238
+ # GPU ์ž‘์—… ์‹œ์ž‘
239
+ with torch.inference_mode():
240
+ try:
241
+ # ๋ชจ๋ธ์„ GPU๋กœ ์ด๋™
242
+ g.trellis_pipeline.to('cuda')
243
+ torch.cuda.synchronize()
244
+
245
+ # 3D ์ƒ์„ฑ
246
+ outputs = g.trellis_pipeline.run(
247
+ image,
248
+ seed=seed,
249
+ formats=["gaussian", "mesh"],
250
+ preprocess_image=False,
251
+ sparse_structure_sampler_params={
252
+ "steps": min(ss_sampling_steps, 12),
253
+ "cfg_strength": ss_guidance_strength,
254
+ },
255
+ slat_sampler_params={
256
+ "steps": min(slat_sampling_steps, 12),
257
+ "cfg_strength": slat_guidance_strength,
258
+ },
259
+ )
260
+ torch.cuda.synchronize()
261
+
262
+ # ๋น„๋””์˜ค ๋ Œ๋”๋ง
263
+ video = render_utils.render_video(
264
+ outputs['gaussian'][0],
265
+ num_frames=60,
266
+ resolution=512
267
+ )['color']
268
+ torch.cuda.synchronize()
269
+
270
+ video_geo = render_utils.render_video(
271
+ outputs['mesh'][0],
272
+ num_frames=60,
273
+ resolution=512
274
+ )['normal']
275
+ torch.cuda.synchronize()
276
+
277
+ # CPU๋กœ ๋ฐ์ดํ„ฐ ์ด๋™
278
+ video = [v.cpu().numpy() if torch.is_tensor(v) else v for v in video]
279
+ video_geo = [v.cpu().numpy() if torch.is_tensor(v) else v for v in video_geo]
280
+
281
+ video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
282
+ new_trial_id = str(uuid.uuid4())
283
+ video_path = f"{TMP_DIR}/{new_trial_id}.mp4"
284
+ os.makedirs(os.path.dirname(video_path), exist_ok=True)
285
+ imageio.mimsave(video_path, video, fps=15)
286
+
287
+ # ์ƒํƒœ ์ €์žฅ
288
+ state = pack_state(outputs['gaussian'][0], outputs['mesh'][0], new_trial_id)
289
+
290
+ return state, video_path
291
+
292
+ finally:
293
+ # ์ •๋ฆฌ ์ž‘์—…
294
+ g.trellis_pipeline.to('cpu')
295
+ if torch.cuda.is_available():
296
+ torch.cuda.empty_cache()
297
+ torch.cuda.synchronize()
298
+ gc.collect()
299
+
300
  except Exception as e:
301
  print(f"Error in image_to_3d: {str(e)}")
 
 
302
  if hasattr(g.trellis_pipeline, 'to'):
303
  g.trellis_pipeline.to('cpu')
304
+ if torch.cuda.is_available():
305
+ torch.cuda.empty_cache()
306
+ torch.cuda.synchronize()
307
+ gc.collect()
308
+ return None, None
309
 
310
  def clear_gpu_memory():
311
  """GPU ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์ •๋ฆฌํ•˜๋Š” ์œ ํ‹ธ๋ฆฌํ‹ฐ ํ•จ์ˆ˜"""
 
349
  @spaces.GPU
350
  def text_to_image(prompt: str, height: int, width: int, steps: int, scales: float, seed: int) -> Image.Image:
351
  try:
352
+ # CUDA ๋ฉ”๋ชจ๋ฆฌ ์ดˆ๊ธฐํ™”
353
+ if torch.cuda.is_available():
354
+ torch.cuda.empty_cache()
355
+ torch.cuda.synchronize()
356
+ gc.collect()
357
 
358
  # ํ•œ๊ธ€ ๊ฐ์ง€ ๋ฐ ๋ฒˆ์—ญ
359
  def contains_korean(text):
 
370
  width = min(width, 512)
371
  steps = min(steps, 12)
372
 
373
+ with torch.inference_mode():
374
  generated_image = g.flux_pipe(
375
  prompt=[formatted_prompt],
376
  generator=torch.Generator('cuda').manual_seed(int(seed)),
 
395
  print(f"Error in image generation: {str(e)}")
396
  return None
397
  finally:
398
+ if torch.cuda.is_available():
399
+ torch.cuda.empty_cache()
400
+ torch.cuda.synchronize()
401
+ gc.collect()
402
 
403
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
404
  gr.Markdown("""## Craft3D""")