salomonsky commited on
Commit
d2a5152
verified
1 Parent(s): ee8fb83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -33
app.py CHANGED
@@ -36,16 +36,24 @@ def get_upscale_finegrain(prompt, img_path, upscale_factor):
36
  try:
37
  client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
38
  result = client.predict(
39
- input_image=handle_file(img_path), prompt=prompt, negative_prompt="",
40
- seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6,
41
- controlnet_decay=1, condition_scale=6, tile_width=112,
42
- tile_height=144, denoise_strength=0.35, num_inference_steps=18,
43
- solver="DDIM", api_name="/process"
44
  )
45
  return result[1] if isinstance(result, list) and len(result) > 1 else None
46
  except Exception as e:
47
  return None
48
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
50
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
51
  improved_prompt = await improve_prompt(prompt)
@@ -62,12 +70,8 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
62
  progress_bar.empty()
63
  return [image, None, combined_prompt]
64
 
65
- image_path = DATA_PATH / f"image_{seed}.jpg"
66
- image.save(image_path, format="JPEG")
67
-
68
- prompt_file_path = DATA_PATH / f"prompt_{seed}.txt"
69
- with open(prompt_file_path, "w") as prompt_file:
70
- prompt_file.write(combined_prompt)
71
 
72
  if process_upscale:
73
  upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
@@ -75,7 +79,7 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
75
  upscale_image = Image.open(upscale_image_path)
76
  upscale_image.save(DATA_PATH / f"upscale_image_{seed}.jpg", format="JPEG")
77
  progress_bar.progress(100)
78
- image_path.unlink()
79
  return [str(DATA_PATH / f"upscale_image_{seed}.jpg"), str(prompt_file_path)]
80
  else:
81
  progress_bar.empty()
@@ -86,22 +90,16 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
86
 
87
  async def improve_prompt(prompt):
88
  try:
89
- instruction = ("With this idea, describe in English a detailed txt2img prompt in a single paragraph of up to 100 characters maximum, developing atmosphere, characters, lighting, and cameras.")
90
  formatted_prompt = f"{prompt}: {instruction}"
91
  response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
92
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
93
-
94
- if len(improved_text) > 200:
95
- improved_text = improved_text[:200]
96
-
97
- return improved_text
98
  except Exception as e:
99
  return f"Error mejorando el prompt: {e}"
100
 
101
  def get_storage():
102
- files = [{"name": str(file.resolve()), "size": file.stat().st_size}
103
- for file in DATA_PATH.glob("*.jpg") if file.is_file()]
104
-
105
  usage = sum([f['size'] for f in files])
106
  return [f["name"] for f in files], f"Uso total: {usage/(1024.0 ** 3):.3f}GB"
107
 
@@ -109,12 +107,6 @@ def get_prompts():
109
  prompt_files = [file for file in DATA_PATH.glob("*.txt") if file.is_file()]
110
  return {file.stem.replace("prompt_", ""): file for file in prompt_files}
111
 
112
- def run_gen():
113
- loop = asyncio.new_event_loop()
114
- asyncio.set_event_loop(loop)
115
- prompt_to_use = st.session_state.get('improved_prompt', prompt)
116
- return loop.run_until_complete(gen(prompt_to_use, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora))
117
-
118
  def delete_image(image_path):
119
  try:
120
  if Path(image_path).exists():
@@ -125,13 +117,21 @@ def delete_image(image_path):
125
  except Exception as e:
126
  st.error(f"Error al borrar la imagen: {e}")
127
 
 
 
 
 
 
 
 
128
  st.set_page_config(layout="wide")
129
  st.title("Generador de Im谩genes FLUX")
130
  prompt = st.sidebar.text_input("Descripci贸n de la imagen", max_chars=200)
131
 
 
132
  with st.sidebar.expander("Opciones avanzadas", expanded=False):
133
- basemodel = st.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
134
- lora_model = st.selectbox("LORA Realismo", ["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])
135
  format_option = st.selectbox("Formato", ["9:16", "16:9"])
136
  process_lora = st.checkbox("Procesar LORA")
137
  process_upscale = st.checkbox("Procesar Escalador")
@@ -150,7 +150,7 @@ else:
150
  if st.sidebar.button("Mejorar prompt"):
151
  improved_prompt = asyncio.run(improve_prompt(prompt))
152
  st.session_state.improved_prompt = improved_prompt
153
- st.write(f"{improved_prompt}")
154
 
155
  if st.sidebar.button("Generar Imagen"):
156
  with st.spinner("Generando imagen..."):
@@ -182,9 +182,15 @@ for idx, file in enumerate(files):
182
  image = Image.open(file)
183
  prompt_file = prompts.get(Path(file).stem.replace("image_", ""), None)
184
  prompt_text = Path(prompt_file).read_text() if prompt_file else "No disponible"
 
185
  st.image(image, caption=f"Imagen {idx+1}")
186
  st.write(f"Prompt: {prompt_text}")
 
187
  if st.button(f"Borrar Imagen {idx+1}", key=f"delete_{idx}"):
188
- os.remove(file)
189
- if prompt_file:
190
- os.remove(prompt_file)
 
 
 
 
 
36
  try:
37
  client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
38
  result = client.predict(
39
+ input_image=handle_file(img_path), prompt=prompt, upscale_factor=upscale_factor
 
 
 
 
40
  )
41
  return result[1] if isinstance(result, list) and len(result) > 1 else None
42
  except Exception as e:
43
  return None
44
 
45
+ # Funci贸n para guardar el prompt
46
+ def save_prompt(prompt_text, seed):
47
+ try:
48
+ prompt_file_path = DATA_PATH / f"prompt_{seed}.txt"
49
+ with open(prompt_file_path, "w") as prompt_file:
50
+ prompt_file.write(prompt_text)
51
+ return prompt_file_path
52
+ except Exception as e:
53
+ st.error(f"Error al guardar el prompt: {e}")
54
+ return None
55
+
56
+ # Funci贸n principal de generaci贸n de im谩genes
57
  async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
58
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
59
  improved_prompt = await improve_prompt(prompt)
 
70
  progress_bar.empty()
71
  return [image, None, combined_prompt]
72
 
73
+ image_path = save_image(image, seed)
74
+ prompt_file_path = save_prompt(combined_prompt, seed)
 
 
 
 
75
 
76
  if process_upscale:
77
  upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
 
79
  upscale_image = Image.open(upscale_image_path)
80
  upscale_image.save(DATA_PATH / f"upscale_image_{seed}.jpg", format="JPEG")
81
  progress_bar.progress(100)
82
+ image_path.unlink() # Borrar la imagen original si se escal贸
83
  return [str(DATA_PATH / f"upscale_image_{seed}.jpg"), str(prompt_file_path)]
84
  else:
85
  progress_bar.empty()
 
90
 
91
  async def improve_prompt(prompt):
92
  try:
93
+ instruction = ("With this idea, describe in English a detailed txt2img prompt...")
94
  formatted_prompt = f"{prompt}: {instruction}"
95
  response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
96
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
97
+ return improved_text[:200] if len(improved_text) > 200 else improved_text
 
 
 
 
98
  except Exception as e:
99
  return f"Error mejorando el prompt: {e}"
100
 
101
  def get_storage():
102
+ files = [{"name": str(file.resolve()), "size": file.stat().st_size} for file in DATA_PATH.glob("*.jpg") if file.is_file()]
 
 
103
  usage = sum([f['size'] for f in files])
104
  return [f["name"] for f in files], f"Uso total: {usage/(1024.0 ** 3):.3f}GB"
105
 
 
107
  prompt_files = [file for file in DATA_PATH.glob("*.txt") if file.is_file()]
108
  return {file.stem.replace("prompt_", ""): file for file in prompt_files}
109
 
 
 
 
 
 
 
110
  def delete_image(image_path):
111
  try:
112
  if Path(image_path).exists():
 
117
  except Exception as e:
118
  st.error(f"Error al borrar la imagen: {e}")
119
 
120
+ def run_gen():
121
+ loop = asyncio.new_event_loop()
122
+ asyncio.set_event_loop(loop)
123
+ prompt_to_use = st.session_state.get('improved_prompt', prompt)
124
+ return loop.run_until_complete(gen(prompt_to_use, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora))
125
+
126
+ # Configuraci贸n de la p谩gina y sidebar
127
  st.set_page_config(layout="wide")
128
  st.title("Generador de Im谩genes FLUX")
129
  prompt = st.sidebar.text_input("Descripci贸n de la imagen", max_chars=200)
130
 
131
+ # Opciones avanzadas
132
  with st.sidebar.expander("Opciones avanzadas", expanded=False):
133
+ basemodel = st.selectbox("Modelo Base", ["FLUX.1", "FLUX.1-DEV"])
134
+ lora_model = st.selectbox("LORA Realismo", ["FLUX.1-dev-LoRA", "RealismLora"])
135
  format_option = st.selectbox("Formato", ["9:16", "16:9"])
136
  process_lora = st.checkbox("Procesar LORA")
137
  process_upscale = st.checkbox("Procesar Escalador")
 
150
  if st.sidebar.button("Mejorar prompt"):
151
  improved_prompt = asyncio.run(improve_prompt(prompt))
152
  st.session_state.improved_prompt = improved_prompt
153
+ st.write(f"Prompt mejorado: {improved_prompt}")
154
 
155
  if st.sidebar.button("Generar Imagen"):
156
  with st.spinner("Generando imagen..."):
 
182
  image = Image.open(file)
183
  prompt_file = prompts.get(Path(file).stem.replace("image_", ""), None)
184
  prompt_text = Path(prompt_file).read_text() if prompt_file else "No disponible"
185
+
186
  st.image(image, caption=f"Imagen {idx+1}")
187
  st.write(f"Prompt: {prompt_text}")
188
+
189
  if st.button(f"Borrar Imagen {idx+1}", key=f"delete_{idx}"):
190
+ try:
191
+ os.remove(file)
192
+ if prompt_file:
193
+ os.remove(prompt_file)
194
+ st.success(f"Imagen {idx+1} y su prompt fueron borrados.")
195
+ except Exception as e:
196
+ st.error(f"Error al borrar la imagen o prompt: {e}")