Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,8 @@ import gradio as gr
|
|
11 |
import torch
|
12 |
from diffusers import FluxPipeline
|
13 |
from PIL import Image
|
14 |
-
|
|
|
15 |
# Hugging Face ν ν° μ€μ
|
16 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
17 |
if HF_TOKEN is None:
|
@@ -195,14 +196,29 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
195 |
)
|
196 |
|
197 |
gallery.value = load_gallery()
|
198 |
-
|
|
|
199 |
@spaces.GPU
|
200 |
def process_and_save_image(height, width, steps, scales, prompt, seed):
|
201 |
global pipe
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
|
203 |
try:
|
204 |
generated_image = pipe(
|
205 |
-
prompt=[
|
206 |
generator=torch.Generator().manual_seed(int(seed)),
|
207 |
num_inference_steps=int(steps),
|
208 |
guidance_scale=float(scales),
|
@@ -220,6 +236,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
220 |
print(f"Error in image generation: {str(e)}")
|
221 |
return None, load_gallery()
|
222 |
|
|
|
|
|
223 |
def update_seed():
|
224 |
return get_random_seed()
|
225 |
|
|
|
11 |
import torch
|
12 |
from diffusers import FluxPipeline
|
13 |
from PIL import Image
|
14 |
+
from transformers import pipeline
|
15 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
16 |
# Hugging Face ν ν° μ€μ
|
17 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
18 |
if HF_TOKEN is None:
|
|
|
196 |
)
|
197 |
|
198 |
gallery.value = load_gallery()
|
199 |
+
|
200 |
+
|
201 |
@spaces.GPU
|
202 |
def process_and_save_image(height, width, steps, scales, prompt, seed):
|
203 |
global pipe
|
204 |
+
|
205 |
+
# νκΈ κ°μ§ λ° λ²μ
|
206 |
+
def contains_korean(text):
|
207 |
+
return any(ord('κ°') <= ord(c) <= ord('ν£') for c in text)
|
208 |
+
|
209 |
+
# ν둬ννΈ μ μ²λ¦¬
|
210 |
+
if contains_korean(prompt):
|
211 |
+
# νκΈμ μμ΄λ‘ λ²μ
|
212 |
+
translated = translator(prompt)[0]['translation_text']
|
213 |
+
prompt = translated
|
214 |
+
|
215 |
+
# ν둬ννΈ νμ κ°μ
|
216 |
+
formatted_prompt = f"wbgmsst, 3D, {prompt} ,white background"
|
217 |
+
|
218 |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
|
219 |
try:
|
220 |
generated_image = pipe(
|
221 |
+
prompt=[formatted_prompt], # μμ λ ν둬ννΈ μ¬μ©
|
222 |
generator=torch.Generator().manual_seed(int(seed)),
|
223 |
num_inference_steps=int(steps),
|
224 |
guidance_scale=float(scales),
|
|
|
236 |
print(f"Error in image generation: {str(e)}")
|
237 |
return None, load_gallery()
|
238 |
|
239 |
+
|
240 |
+
|
241 |
def update_seed():
|
242 |
return get_random_seed()
|
243 |
|