Spaces:
Running
on
L40S
Running
on
L40S
Update app.py
Browse files
app.py
CHANGED
@@ -16,20 +16,18 @@ from diffusers import FluxPipeline
|
|
16 |
from typing import *
|
17 |
|
18 |
|
|
|
|
|
|
|
|
|
19 |
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ จ ํ๊ฒฝ ๋ณ์
|
20 |
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'
|
21 |
os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
|
22 |
os.environ['TORCH_HOME'] = '/tmp/torch_home'
|
23 |
os.environ['HF_HOME'] = '/tmp/huggingface'
|
24 |
os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
|
25 |
-
|
26 |
-
# ํ๊ฒฝ ๋ณ์ ์ค์
|
27 |
os.environ['SPCONV_ALGO'] = 'native'
|
28 |
-
os.environ['WARP_USE_CPU'] = '1'
|
29 |
-
|
30 |
-
MAX_SEED = np.iinfo(np.int32).max
|
31 |
-
TMP_DIR = "/tmp/Trellis-demo"
|
32 |
-
os.makedirs(TMP_DIR, exist_ok=True)
|
33 |
|
34 |
def initialize_models():
|
35 |
global pipeline, translator, flux_pipe
|
@@ -39,22 +37,20 @@ def initialize_models():
|
|
39 |
for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
|
40 |
os.makedirs(dir_path, exist_ok=True)
|
41 |
|
42 |
-
# Trellis ํ์ดํ๋ผ์ธ ์ด๊ธฐํ
|
43 |
pipeline = TrellisImageTo3DPipeline.from_pretrained(
|
44 |
-
"JeffreyXiang/TRELLIS-image-large"
|
45 |
-
resume_download=True,
|
46 |
-
local_files_only=False
|
47 |
)
|
48 |
|
49 |
-
# ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ
|
50 |
translator = translation_pipeline(
|
51 |
"translation",
|
52 |
model="Helsinki-NLP/opus-mt-ko-en",
|
53 |
device="cpu"
|
54 |
)
|
55 |
|
56 |
-
# Flux
|
57 |
-
flux_pipe = None
|
58 |
|
59 |
print("Models initialized successfully")
|
60 |
return True
|
@@ -89,6 +85,7 @@ def free_memory():
|
|
89 |
except Exception as e:
|
90 |
print(f'Error deleting {file_path}: {e}')
|
91 |
|
|
|
92 |
@spaces.GPU
|
93 |
def setup_gpu_model(model):
|
94 |
"""GPU ์ค์ ์ด ํ์ํ ๋ชจ๋ธ์ ์ฒ๋ฆฌํ๋ ํจ์"""
|
@@ -171,7 +168,7 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
171 |
|
172 |
input_image = Image.open(f"{TMP_DIR}/{trial_id}.png")
|
173 |
|
174 |
-
#
|
175 |
max_size = 512
|
176 |
if max(input_image.size) > max_size:
|
177 |
ratio = max_size / max(input_image.size)
|
@@ -200,6 +197,7 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
200 |
}
|
201 |
)
|
202 |
|
|
|
203 |
video = render_utils.render_video(outputs['gaussian'][0], num_frames=30)['color']
|
204 |
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=30)['normal']
|
205 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
@@ -225,7 +223,7 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
225 |
@spaces.GPU
|
226 |
def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
|
227 |
try:
|
228 |
-
# Flux ํ์ดํ๋ผ์ธ
|
229 |
flux_pipe = load_flux_pipe()
|
230 |
|
231 |
if torch.cuda.is_available():
|
@@ -416,6 +414,5 @@ if __name__ == "__main__":
|
|
416 |
share=True,
|
417 |
max_threads=1,
|
418 |
show_error=True,
|
419 |
-
cache_examples=False
|
420 |
-
|
421 |
-
)
|
|
|
16 |
from typing import *
|
17 |
|
18 |
|
19 |
+
MAX_SEED = np.iinfo(np.int32).max
|
20 |
+
TMP_DIR = "/tmp/Trellis-demo"
|
21 |
+
os.makedirs(TMP_DIR, exist_ok=True)
|
22 |
+
|
23 |
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ จ ํ๊ฒฝ ๋ณ์
|
24 |
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'
|
25 |
os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
|
26 |
os.environ['TORCH_HOME'] = '/tmp/torch_home'
|
27 |
os.environ['HF_HOME'] = '/tmp/huggingface'
|
28 |
os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
|
|
|
|
|
29 |
os.environ['SPCONV_ALGO'] = 'native'
|
30 |
+
os.environ['WARP_USE_CPU'] = '1'
|
|
|
|
|
|
|
|
|
31 |
|
32 |
def initialize_models():
|
33 |
global pipeline, translator, flux_pipe
|
|
|
37 |
for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
|
38 |
os.makedirs(dir_path, exist_ok=True)
|
39 |
|
40 |
+
# Trellis ํ์ดํ๋ผ์ธ ์ด๊ธฐํ
|
41 |
pipeline = TrellisImageTo3DPipeline.from_pretrained(
|
42 |
+
"JeffreyXiang/TRELLIS-image-large"
|
|
|
|
|
43 |
)
|
44 |
|
45 |
+
# ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ
|
46 |
translator = translation_pipeline(
|
47 |
"translation",
|
48 |
model="Helsinki-NLP/opus-mt-ko-en",
|
49 |
device="cpu"
|
50 |
)
|
51 |
|
52 |
+
# Flux ํ์ดํ๋ผ์ธ์ ํ์ํ ๋ ๋ก๋
|
53 |
+
flux_pipe = None
|
54 |
|
55 |
print("Models initialized successfully")
|
56 |
return True
|
|
|
85 |
except Exception as e:
|
86 |
print(f'Error deleting {file_path}: {e}')
|
87 |
|
88 |
+
|
89 |
@spaces.GPU
|
90 |
def setup_gpu_model(model):
|
91 |
"""GPU ์ค์ ์ด ํ์ํ ๋ชจ๋ธ์ ์ฒ๋ฆฌํ๋ ํจ์"""
|
|
|
168 |
|
169 |
input_image = Image.open(f"{TMP_DIR}/{trial_id}.png")
|
170 |
|
171 |
+
# ์ด๋ฏธ์ง ํฌ๊ธฐ ์ ํ
|
172 |
max_size = 512
|
173 |
if max(input_image.size) > max_size:
|
174 |
ratio = max_size / max(input_image.size)
|
|
|
197 |
}
|
198 |
)
|
199 |
|
200 |
+
# ๋น๋์ค ํ๋ ์ ์ ๊ฐ์
|
201 |
video = render_utils.render_video(outputs['gaussian'][0], num_frames=30)['color']
|
202 |
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=30)['normal']
|
203 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
|
|
223 |
@spaces.GPU
|
224 |
def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
|
225 |
try:
|
226 |
+
# Flux ํ์ดํ๋ผ์ธ ๋ก๋
|
227 |
flux_pipe = load_flux_pipe()
|
228 |
|
229 |
if torch.cuda.is_available():
|
|
|
414 |
share=True,
|
415 |
max_threads=1,
|
416 |
show_error=True,
|
417 |
+
cache_examples=False
|
418 |
+
)
|
|