Spaces:
Running
on
L40S
Running
on
L40S
Update app.py
Browse files
app.py
CHANGED
@@ -18,11 +18,10 @@ from typing import *
|
|
18 |
|
19 |
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ จ ํ๊ฒฝ ๋ณ์
|
20 |
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'
|
21 |
-
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
|
22 |
-
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
|
23 |
-
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
|
24 |
os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
|
|
|
25 |
os.environ['HF_HOME'] = '/tmp/huggingface'
|
|
|
26 |
|
27 |
# ํ๊ฒฝ ๋ณ์ ์ค์
|
28 |
os.environ['SPCONV_ALGO'] = 'native'
|
@@ -36,23 +35,26 @@ def initialize_models():
|
|
36 |
global pipeline, translator, flux_pipe
|
37 |
|
38 |
try:
|
39 |
-
#
|
|
|
|
|
|
|
|
|
40 |
pipeline = TrellisImageTo3DPipeline.from_pretrained(
|
41 |
-
"JeffreyXiang/TRELLIS-image-large"
|
|
|
|
|
42 |
)
|
43 |
|
44 |
-
# ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ
|
45 |
translator = translation_pipeline(
|
46 |
"translation",
|
47 |
model="Helsinki-NLP/opus-mt-ko-en",
|
48 |
device="cpu"
|
49 |
)
|
50 |
|
51 |
-
# Flux ํ์ดํ๋ผ์ธ ์ด๊ธฐํ
|
52 |
-
flux_pipe =
|
53 |
-
"black-forest-labs/FLUX.1-dev",
|
54 |
-
torch_dtype=torch.float32 # CPU ๋ชจ๋
|
55 |
-
)
|
56 |
|
57 |
print("Models initialized successfully")
|
58 |
return True
|
@@ -61,10 +63,31 @@ def initialize_models():
|
|
61 |
print(f"Model initialization error: {str(e)}")
|
62 |
return False
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
def free_memory():
|
65 |
"""๋ฉ๋ชจ๋ฆฌ๋ฅผ ์ ๋ฆฌํ๋ ์ ํธ๋ฆฌํฐ ํจ์"""
|
66 |
import gc
|
67 |
gc.collect()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
@spaces.GPU
|
70 |
def setup_gpu_model(model):
|
@@ -73,29 +96,6 @@ def setup_gpu_model(model):
|
|
73 |
model = model.to("cuda")
|
74 |
return model
|
75 |
|
76 |
-
if __name__ == "__main__":
|
77 |
-
# CPU ๋ฉ๋ชจ๋ฆฌ๋ง ์ ๋ฆฌ
|
78 |
-
free_memory()
|
79 |
-
|
80 |
-
# ๋ชจ๋ธ ์ด๊ธฐํ
|
81 |
-
if not initialize_models():
|
82 |
-
print("Failed to initialize models")
|
83 |
-
exit(1)
|
84 |
-
|
85 |
-
try:
|
86 |
-
# rembg ์ฌ์ ๋ก๋ ์๋ (์์ ์ด๋ฏธ์ง๋ก)
|
87 |
-
test_image = Image.fromarray(np.ones((64, 64, 3), dtype=np.uint8) * 255)
|
88 |
-
pipeline.preprocess_image(test_image)
|
89 |
-
except Exception as e:
|
90 |
-
print(f"Warning: Failed to preload rembg: {str(e)}")
|
91 |
-
|
92 |
-
# Gradio ์ฑ ์คํ
|
93 |
-
demo.queue(max_size=5).launch(
|
94 |
-
share=True,
|
95 |
-
max_threads=2,
|
96 |
-
show_error=True,
|
97 |
-
cache_examples=False
|
98 |
-
)
|
99 |
|
100 |
def translate_if_korean(text):
|
101 |
if any(ord('๊ฐ') <= ord(char) <= ord('ํฃ') for char in text):
|
@@ -225,18 +225,18 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
225 |
@spaces.GPU
|
226 |
def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
|
227 |
try:
|
228 |
-
#
|
|
|
|
|
229 |
if torch.cuda.is_available():
|
230 |
flux_pipe.to("cuda")
|
231 |
-
flux_pipe.to(torch.float16)
|
232 |
|
233 |
-
#
|
234 |
-
|
|
|
235 |
|
236 |
-
|
237 |
translated_prompt = translate_if_korean(prompt)
|
238 |
-
|
239 |
-
# ์ต์ข
ํ๋กฌํํธ ์กฐํฉ
|
240 |
final_prompt = f"{translated_prompt}, {base_prompt}"
|
241 |
|
242 |
with torch.inference_mode():
|
@@ -244,18 +244,19 @@ def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
|
|
244 |
prompt=[final_prompt],
|
245 |
height=height,
|
246 |
width=width,
|
247 |
-
guidance_scale=guidance_scale,
|
248 |
-
num_inference_steps=num_steps
|
249 |
).images[0]
|
250 |
|
251 |
-
|
252 |
-
|
253 |
|
254 |
return image
|
255 |
|
256 |
except Exception as e:
|
257 |
print(f"Error in generate_image_from_text: {str(e)}")
|
258 |
-
|
|
|
259 |
raise e
|
260 |
|
261 |
@spaces.GPU
|
@@ -395,7 +396,7 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
|
|
395 |
)
|
396 |
|
397 |
if __name__ == "__main__":
|
398 |
-
#
|
399 |
free_memory()
|
400 |
|
401 |
# ๋ชจ๋ธ ์ด๊ธฐํ
|
@@ -404,16 +405,16 @@ if __name__ == "__main__":
|
|
404 |
exit(1)
|
405 |
|
406 |
try:
|
407 |
-
# rembg ์ฌ์ ๋ก๋ ์๋ (์์ ์ด๋ฏธ์ง๋ก)
|
408 |
-
test_image = Image.fromarray(np.ones((
|
409 |
pipeline.preprocess_image(test_image)
|
410 |
except Exception as e:
|
411 |
print(f"Warning: Failed to preload rembg: {str(e)}")
|
412 |
|
413 |
# Gradio ์ฑ ์คํ
|
414 |
-
demo.queue(max_size=
|
415 |
share=True,
|
416 |
-
max_threads=
|
417 |
show_error=True,
|
418 |
cache_examples=False,
|
419 |
enable_queue=True
|
|
|
18 |
|
19 |
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ จ ํ๊ฒฝ ๋ณ์
|
20 |
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'
|
|
|
|
|
|
|
21 |
os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
|
22 |
+
os.environ['TORCH_HOME'] = '/tmp/torch_home'
|
23 |
os.environ['HF_HOME'] = '/tmp/huggingface'
|
24 |
+
os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
|
25 |
|
26 |
# ํ๊ฒฝ ๋ณ์ ์ค์
|
27 |
os.environ['SPCONV_ALGO'] = 'native'
|
|
|
35 |
global pipeline, translator, flux_pipe
|
36 |
|
37 |
try:
|
38 |
+
# ์บ์ ๋๋ ํ ๋ฆฌ ์์ฑ
|
39 |
+
for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
|
40 |
+
os.makedirs(dir_path, exist_ok=True)
|
41 |
+
|
42 |
+
# Trellis ํ์ดํ๋ผ์ธ ์ด๊ธฐํ (๋ฉ๋ชจ๋ฆฌ ์ต์ ํ)
|
43 |
pipeline = TrellisImageTo3DPipeline.from_pretrained(
|
44 |
+
"JeffreyXiang/TRELLIS-image-large",
|
45 |
+
resume_download=True,
|
46 |
+
local_files_only=False
|
47 |
)
|
48 |
|
49 |
+
# ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ (๋ ์์ ๋ชจ๋ธ ์ฌ์ฉ)
|
50 |
translator = translation_pipeline(
|
51 |
"translation",
|
52 |
model="Helsinki-NLP/opus-mt-ko-en",
|
53 |
device="cpu"
|
54 |
)
|
55 |
|
56 |
+
# Flux ํ์ดํ๋ผ์ธ ์ด๊ธฐํ (๋ฉ๋ชจ๋ฆฌ ์ต์ ํ)
|
57 |
+
flux_pipe = None # ํ์ํ ๋ ๋ก๋
|
|
|
|
|
|
|
58 |
|
59 |
print("Models initialized successfully")
|
60 |
return True
|
|
|
63 |
print(f"Model initialization error: {str(e)}")
|
64 |
return False
|
65 |
|
66 |
+
def load_flux_pipe():
|
67 |
+
"""Flux ํ์ดํ๋ผ์ธ์ ํ์ํ ๋๋ง ๋ก๋"""
|
68 |
+
global flux_pipe
|
69 |
+
if flux_pipe is None:
|
70 |
+
flux_pipe = FluxPipeline.from_pretrained(
|
71 |
+
"black-forest-labs/FLUX.1-dev",
|
72 |
+
torch_dtype=torch.float32
|
73 |
+
)
|
74 |
+
return flux_pipe
|
75 |
+
|
76 |
def free_memory():
|
77 |
"""๋ฉ๋ชจ๋ฆฌ๋ฅผ ์ ๋ฆฌํ๋ ์ ํธ๋ฆฌํฐ ํจ์"""
|
78 |
import gc
|
79 |
gc.collect()
|
80 |
+
|
81 |
+
# ์์ ํ์ผ ์ ๋ฆฌ
|
82 |
+
for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
|
83 |
+
if os.path.exists(dir_path):
|
84 |
+
for file in os.listdir(dir_path):
|
85 |
+
file_path = os.path.join(dir_path, file)
|
86 |
+
try:
|
87 |
+
if os.path.isfile(file_path):
|
88 |
+
os.unlink(file_path)
|
89 |
+
except Exception as e:
|
90 |
+
print(f'Error deleting {file_path}: {e}')
|
91 |
|
92 |
@spaces.GPU
|
93 |
def setup_gpu_model(model):
|
|
|
96 |
model = model.to("cuda")
|
97 |
return model
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
def translate_if_korean(text):
|
101 |
if any(ord('๊ฐ') <= ord(char) <= ord('ํฃ') for char in text):
|
|
|
225 |
@spaces.GPU
|
226 |
def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
|
227 |
try:
|
228 |
+
# Flux ํ์ดํ๋ผ์ธ ํ์ํ ๋ ๋ก๋
|
229 |
+
flux_pipe = load_flux_pipe()
|
230 |
+
|
231 |
if torch.cuda.is_available():
|
232 |
flux_pipe.to("cuda")
|
|
|
233 |
|
234 |
+
# ์ด๋ฏธ์ง ํฌ๊ธฐ ์ ํ
|
235 |
+
height = min(height, 512)
|
236 |
+
width = min(width, 512)
|
237 |
|
238 |
+
base_prompt = "wbgmsst, 3D, white background"
|
239 |
translated_prompt = translate_if_korean(prompt)
|
|
|
|
|
240 |
final_prompt = f"{translated_prompt}, {base_prompt}"
|
241 |
|
242 |
with torch.inference_mode():
|
|
|
244 |
prompt=[final_prompt],
|
245 |
height=height,
|
246 |
width=width,
|
247 |
+
guidance_scale=min(guidance_scale, 10.0),
|
248 |
+
num_inference_steps=min(num_steps, 30)
|
249 |
).images[0]
|
250 |
|
251 |
+
if torch.cuda.is_available():
|
252 |
+
flux_pipe.to("cpu")
|
253 |
|
254 |
return image
|
255 |
|
256 |
except Exception as e:
|
257 |
print(f"Error in generate_image_from_text: {str(e)}")
|
258 |
+
if torch.cuda.is_available() and flux_pipe is not None:
|
259 |
+
flux_pipe.to("cpu")
|
260 |
raise e
|
261 |
|
262 |
@spaces.GPU
|
|
|
396 |
)
|
397 |
|
398 |
if __name__ == "__main__":
|
399 |
+
# ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
400 |
free_memory()
|
401 |
|
402 |
# ๋ชจ๋ธ ์ด๊ธฐํ
|
|
|
405 |
exit(1)
|
406 |
|
407 |
try:
|
408 |
+
# rembg ์ฌ์ ๋ก๋ ์๋ (๋งค์ฐ ์์ ์ด๋ฏธ์ง๋ก)
|
409 |
+
test_image = Image.fromarray(np.ones((32, 32, 3), dtype=np.uint8) * 255)
|
410 |
pipeline.preprocess_image(test_image)
|
411 |
except Exception as e:
|
412 |
print(f"Warning: Failed to preload rembg: {str(e)}")
|
413 |
|
414 |
# Gradio ์ฑ ์คํ
|
415 |
+
demo.queue(max_size=3).launch(
|
416 |
share=True,
|
417 |
+
max_threads=1,
|
418 |
show_error=True,
|
419 |
cache_examples=False,
|
420 |
enable_queue=True
|