Spaces:
Running
on
L40S
Running
on
L40S
Update app.py
Browse files
app.py
CHANGED
@@ -49,8 +49,11 @@ def initialize_models():
|
|
49 |
device="cpu"
|
50 |
)
|
51 |
|
52 |
-
# Flux
|
53 |
-
flux_pipe =
|
|
|
|
|
|
|
54 |
|
55 |
print("Models initialized successfully")
|
56 |
return True
|
@@ -74,6 +77,10 @@ def free_memory():
|
|
74 |
import gc
|
75 |
gc.collect()
|
76 |
|
|
|
|
|
|
|
|
|
77 |
# ์์ ํ์ผ ์ ๋ฆฌ
|
78 |
for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
|
79 |
if os.path.exists(dir_path):
|
@@ -223,38 +230,43 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
223 |
@spaces.GPU
|
224 |
def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
|
225 |
try:
|
226 |
-
|
227 |
-
flux_pipe = load_flux_pipe()
|
228 |
|
229 |
if torch.cuda.is_available():
|
230 |
-
flux_pipe.to("cuda")
|
|
|
231 |
|
232 |
# ์ด๋ฏธ์ง ํฌ๊ธฐ ์ ํ
|
233 |
height = min(height, 512)
|
234 |
width = min(width, 512)
|
235 |
|
|
|
236 |
base_prompt = "wbgmsst, 3D, white background"
|
237 |
translated_prompt = translate_if_korean(prompt)
|
238 |
final_prompt = f"{translated_prompt}, {base_prompt}"
|
239 |
|
|
|
|
|
240 |
with torch.inference_mode():
|
241 |
-
|
242 |
prompt=[final_prompt],
|
243 |
height=height,
|
244 |
width=width,
|
245 |
guidance_scale=min(guidance_scale, 10.0),
|
246 |
num_inference_steps=min(num_steps, 30)
|
247 |
-
)
|
|
|
|
|
248 |
|
249 |
if torch.cuda.is_available():
|
250 |
-
flux_pipe.to("cpu")
|
251 |
|
252 |
return image
|
253 |
|
254 |
except Exception as e:
|
255 |
print(f"Error in generate_image_from_text: {str(e)}")
|
256 |
-
if torch.cuda.is_available()
|
257 |
-
flux_pipe.to("cpu")
|
258 |
raise e
|
259 |
|
260 |
@spaces.GPU
|
@@ -389,14 +401,15 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
|
|
389 |
activate_button,
|
390 |
outputs=[download_glb]
|
391 |
)
|
392 |
-
|
393 |
generate_txt2img_btn.click(
|
394 |
generate_image_from_text,
|
395 |
inputs=[text_prompt, txt2img_height, txt2img_width, guidance_scale, num_steps],
|
396 |
outputs=[txt2img_output],
|
397 |
-
concurrency_limit=1
|
|
|
398 |
)
|
399 |
|
|
|
400 |
if __name__ == "__main__":
|
401 |
# ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
402 |
free_memory()
|
|
|
49 |
device="cpu"
|
50 |
)
|
51 |
|
52 |
+
# Flux ํ์ดํ๋ผ์ธ ์ด๊ธฐํ (์ฆ์ ๋ก๋)
|
53 |
+
flux_pipe = FluxPipeline.from_pretrained(
|
54 |
+
"black-forest-labs/FLUX.1-dev",
|
55 |
+
torch_dtype=torch.float32
|
56 |
+
)
|
57 |
|
58 |
print("Models initialized successfully")
|
59 |
return True
|
|
|
77 |
import gc
|
78 |
gc.collect()
|
79 |
|
80 |
+
if torch.cuda.is_available():
|
81 |
+
with torch.cuda.device('cuda'):
|
82 |
+
torch.cuda.empty_cache()
|
83 |
+
|
84 |
# ์์ ํ์ผ ์ ๋ฆฌ
|
85 |
for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
|
86 |
if os.path.exists(dir_path):
|
|
|
230 |
@spaces.GPU
|
231 |
def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
|
232 |
try:
|
233 |
+
global flux_pipe
|
|
|
234 |
|
235 |
if torch.cuda.is_available():
|
236 |
+
flux_pipe = flux_pipe.to("cuda")
|
237 |
+
flux_pipe = flux_pipe.to(torch.float16)
|
238 |
|
239 |
# ์ด๋ฏธ์ง ํฌ๊ธฐ ์ ํ
|
240 |
height = min(height, 512)
|
241 |
width = min(width, 512)
|
242 |
|
243 |
+
# ํ๋กฌํํธ ์ฒ๋ฆฌ
|
244 |
base_prompt = "wbgmsst, 3D, white background"
|
245 |
translated_prompt = translate_if_korean(prompt)
|
246 |
final_prompt = f"{translated_prompt}, {base_prompt}"
|
247 |
|
248 |
+
print(f"Generating image with prompt: {final_prompt}")
|
249 |
+
|
250 |
with torch.inference_mode():
|
251 |
+
output = flux_pipe(
|
252 |
prompt=[final_prompt],
|
253 |
height=height,
|
254 |
width=width,
|
255 |
guidance_scale=min(guidance_scale, 10.0),
|
256 |
num_inference_steps=min(num_steps, 30)
|
257 |
+
)
|
258 |
+
|
259 |
+
image = output.images[0]
|
260 |
|
261 |
if torch.cuda.is_available():
|
262 |
+
flux_pipe = flux_pipe.to("cpu")
|
263 |
|
264 |
return image
|
265 |
|
266 |
except Exception as e:
|
267 |
print(f"Error in generate_image_from_text: {str(e)}")
|
268 |
+
if torch.cuda.is_available():
|
269 |
+
flux_pipe = flux_pipe.to("cpu")
|
270 |
raise e
|
271 |
|
272 |
@spaces.GPU
|
|
|
401 |
activate_button,
|
402 |
outputs=[download_glb]
|
403 |
)
|
|
|
404 |
generate_txt2img_btn.click(
|
405 |
generate_image_from_text,
|
406 |
inputs=[text_prompt, txt2img_height, txt2img_width, guidance_scale, num_steps],
|
407 |
outputs=[txt2img_output],
|
408 |
+
concurrency_limit=1,
|
409 |
+
show_progress=True # ์งํ ์ํฉ ํ์
|
410 |
)
|
411 |
|
412 |
+
|
413 |
if __name__ == "__main__":
|
414 |
# ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
415 |
free_memory()
|