ginipick commited on
Commit
2045d7f
β€’
1 Parent(s): 31aa93b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -3
app.py CHANGED
@@ -11,7 +11,8 @@ import gradio as gr
11
  import torch
12
  from diffusers import FluxPipeline
13
  from PIL import Image
14
-
 
15
  # Hugging Face 토큰 μ„€μ •
16
  HF_TOKEN = os.getenv("HF_TOKEN")
17
  if HF_TOKEN is None:
@@ -195,14 +196,29 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
195
  )
196
 
197
  gallery.value = load_gallery()
198
-
 
199
  @spaces.GPU
200
  def process_and_save_image(height, width, steps, scales, prompt, seed):
201
  global pipe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
203
  try:
204
  generated_image = pipe(
205
- prompt=[prompt],
206
  generator=torch.Generator().manual_seed(int(seed)),
207
  num_inference_steps=int(steps),
208
  guidance_scale=float(scales),
@@ -220,6 +236,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
220
  print(f"Error in image generation: {str(e)}")
221
  return None, load_gallery()
222
 
 
 
223
  def update_seed():
224
  return get_random_seed()
225
 
 
11
  import torch
12
  from diffusers import FluxPipeline
13
  from PIL import Image
14
+ from transformers import pipeline
15
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
16
  # Hugging Face 토큰 μ„€μ •
17
  HF_TOKEN = os.getenv("HF_TOKEN")
18
  if HF_TOKEN is None:
 
196
  )
197
 
198
  gallery.value = load_gallery()
199
+
200
+
201
  @spaces.GPU
202
  def process_and_save_image(height, width, steps, scales, prompt, seed):
203
  global pipe
204
+
205
+ # ν•œκΈ€ 감지 및 λ²ˆμ—­
206
+ def contains_korean(text):
207
+ return any(ord('κ°€') <= ord(c) <= ord('힣') for c in text)
208
+
209
+ # ν”„λ‘¬ν”„νŠΈ μ „μ²˜λ¦¬
210
+ if contains_korean(prompt):
211
+ # ν•œκΈ€μ„ μ˜μ–΄λ‘œ λ²ˆμ—­
212
+ translated = translator(prompt)[0]['translation_text']
213
+ prompt = translated
214
+
215
+ # ν”„λ‘¬ν”„νŠΈ ν˜•μ‹ κ°•μ œ
216
+ formatted_prompt = f"wbgmsst, 3D, {prompt} ,white background"
217
+
218
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
219
  try:
220
  generated_image = pipe(
221
+ prompt=[formatted_prompt], # μˆ˜μ •λœ ν”„λ‘¬ν”„νŠΈ μ‚¬μš©
222
  generator=torch.Generator().manual_seed(int(seed)),
223
  num_inference_steps=int(steps),
224
  guidance_scale=float(scales),
 
236
  print(f"Error in image generation: {str(e)}")
237
  return None, load_gallery()
238
 
239
+
240
+
241
  def update_seed():
242
  return get_random_seed()
243