salomonsky commited on
Commit
a1dbcb7
verified
1 Parent(s): dd1b42e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +204 -142
app.py CHANGED
@@ -1,79 +1,86 @@
1
- import os
2
- import random
3
  from pathlib import Path
4
- from PIL import Image, ImageOps
5
  import streamlit as st
 
 
6
  from huggingface_hub import InferenceClient, AsyncInferenceClient
7
- import face_recognition
 
 
 
 
8
 
9
- TEMP_PATH = Path("./temp")
10
- TEMP_PATH.mkdir(parents=True, exist_ok=True)
11
- st.set_page_config(layout="wide")
 
 
 
 
 
12
  client = AsyncInferenceClient()
13
  llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
 
 
 
14
 
15
- def save_prompt(description):
16
- prompt_file = TEMP_PATH / "prompts.txt"
17
- with open(prompt_file, "a") as file:
18
- file.write(f"{description}\n")
19
-
20
- def save_image(image, file_name, prompt=None):
21
- image_path = TEMP_PATH / file_name
22
- if image_path.exists():
23
- st.warning(f"La imagen '{file_name}' ya existe en la galer铆a. No se guard贸.")
24
- return None
25
- image.save(image_path, format="JPEG")
26
- if prompt:
27
- save_prompt(f"{file_name}: {prompt}")
28
- return image_path
29
 
30
- def generate_thumbnail(image_file, size=(150, 150)):
31
  try:
32
- with Image.open(image_file) as img:
33
- img.thumbnail(size, Image.LANCZOS)
34
- thumbnail_path = TEMP_PATH / f"{Path(image_file).stem}_thumbnail.jpg"
35
- img.save(thumbnail_path, format="JPEG")
36
- return thumbnail_path
37
  except Exception as e:
38
- st.error(f"Error al generar thumbnail: {e}")
39
  return None
40
 
41
- def show_image_gallery(images):
42
- cols = st.columns(8)
43
- for i, image_file in enumerate(images):
44
- with cols[i % 8]:
45
- thumbnail_image = Image.open(image_file)
46
- st.image(thumbnail_image, caption=f"Imagen {i + 1}", use_column_width=True)
47
- if st.button(f"Eliminar Imagen {i + 1}", key=f"delete_{i + 1}"):
48
- os.remove(image_file)
49
- st.warning(f"Imagen {i + 1} eliminada")
50
-
51
- async def improve_prompt(prompt):
52
- try:
53
- instructions = [
54
- "With this words, create a photorealistic description for a detailed txt2img prompt in English in 200 characters maximum",
55
- "With this idea, write a creative, realistic, and detailed text-to-image prompt in English in 200 characters maximum",
56
- "With this text, generate a descriptive and True to life txt2img prompt in English in 200 characters maximum",
57
- "With my idea, describe a photorealistic scene with detailed illumination for a txt2img prompt in English in 200 characters maximum",
58
- "With this concept, give a realistic, elegant txt2img prompt in English, emphasizing photorealism in 200 characters maximum",
59
- "With this perspective, conform a visually dynamic and hyperrealistic txt2img prompt in English in 200 characters maximum",
60
- "With this inspiration, realize a cinematic txt2img prompt in English with hyperrealistic elements in 200 characters maximum",
61
- "With my idea, make a lifelike and txt2img prompt in English, focusing on photorealistic depth in 200 characters maximum"
62
- ]
63
-
64
- instruction = random.choice(instructions)
65
- formatted_prompt = f"{prompt}: {instruction}"
66
- response = llm_client.text_generation(formatted_prompt, max_new_tokens=256)
67
- return response['generated_text'][:200] if 'generated_text' in response else response.strip()
68
- except Exception as e:
69
- return f"Error mejorando el prompt: {e}"
 
 
 
 
 
 
 
70
 
71
- async def gen(prompts, width, height, model_name, num_variants=1, use_enhanced=True, seed=-1):
72
  images = []
73
  try:
74
  for idx, prompt in enumerate(prompts):
75
- image, seed_used = await generate_image(prompt, width, height, model_name, seed)
76
- image_path = save_image(image, f"generated_image_{seed_used}.jpg", prompt)
 
77
  if image_path:
78
  st.success(f"Imagen {idx + 1} generada")
79
  images.append(str(image_path))
@@ -81,19 +88,87 @@ async def gen(prompts, width, height, model_name, num_variants=1, use_enhanced=T
81
  st.error(f"Error al generar im谩genes: {e}")
82
  return images
83
 
84
- async def generate_image(prompt, width, height, model_name, seed):
85
- if seed == -1:
86
- seed = random.randint(0, 2147483647)
87
- image = await client.text_to_image(prompt=prompt, height=height, width=width, model=model_name, seed=seed)
88
- return image, seed
89
-
90
  def list_saved_images():
91
- return sorted(TEMP_PATH.glob("*.jpg"), key=os.path.getmtime, reverse=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  def login_form():
94
  st.title("Iniciar Sesi贸n")
95
  username = st.text_input("Usuario", value="admin")
96
- password = st.text_input("Contrase帽a", value="flux3x", type="password")
97
  if st.button("Iniciar Sesi贸n"):
98
  if authenticate_user(username, password):
99
  st.success("Autenticaci贸n exitosa.")
@@ -101,92 +176,79 @@ def login_form():
101
  else:
102
  st.error("Credenciales incorrectas. Intenta de nuevo.")
103
 
104
- async def generate_variations(prompt, num_variants, use_enhanced, style):
105
- prompts = set()
106
- while len(prompts) < num_variants:
107
- enhanced_prompt = await improve_prompt(f"{prompt}, estilo: {style}") if use_enhanced else f"{prompt}, estilo: {style}"
108
- prompts.add(enhanced_prompt)
109
- return list(prompts)
110
-
111
- def authenticate_user(username, password):
112
- return username == "admin" and password == "flux3x"
113
-
114
- def swap_faces(source_image_path, target_image_path, output_path):
115
  try:
116
- source_image = face_recognition.load_image_file(source_image_path)
117
- target_image = face_recognition.load_image_file(target_image_path)
118
-
119
- source_face_locations = face_recognition.face_locations(source_image)
120
- target_face_locations = face_recognition.face_locations(target_image)
121
-
122
- if not source_face_locations or not target_face_locations:
123
- raise ValueError("No se detectaron rostros en una de las im谩genes.")
124
-
125
- source_face_encodings = face_recognition.face_encodings(source_image, source_face_locations)
126
- target_face_encodings = face_recognition.face_encodings(target_image, target_face_locations)
127
-
128
- # En este ejemplo, solo se realiza un intercambio b谩sico del primer rostro encontrado
129
- for target_location, target_encoding in zip(target_face_locations, target_face_encodings):
130
- match = face_recognition.compare_faces(source_face_encodings, target_encoding, tolerance=0.6)
131
- if any(match):
132
- top, right, bottom, left = target_location
133
- target_image[top:bottom, left:right] = source_image[top:bottom, left:right]
134
-
135
- swapped_image = Image.fromarray(target_image)
136
- swapped_image.save(output_path)
137
- return output_path
138
-
139
  except Exception as e:
140
- st.error(f"Error al intercambiar rostros: {e}")
141
  return None
142
 
 
 
 
 
 
 
 
 
 
143
  async def main():
 
 
144
  if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
145
  login_form()
146
  return
147
 
 
 
148
  prompt = st.sidebar.text_area("Descripci贸n de la imagen", height=150, max_chars=500)
149
- format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9"])
150
- width, height = (720, 1280) if format_option == "9:16" else (1280, 720)
151
- st.sidebar.subheader("Configuraci贸n de Imagen")
152
- model_option = st.sidebar.selectbox("Modelo", ["black-forest-labs/FLUX.1-schnell", "prashanth970/flux-lora-uncensored"])
153
- style_type = st.sidebar.selectbox("Estilo", ["Realismo", "Hiperrealismo", "Photorealismo"])
154
- st.sidebar.subheader("Par谩metros de Generaci贸n")
155
- randomize_seed = st.sidebar.checkbox("Randomize Seed", value=True)
156
- seed = -1 if randomize_seed else st.sidebar.number_input("Seed", value=42, min_value=0, max_value=2147483647)
157
- prompt_checkbox = st.sidebar.checkbox("Mejorar Prompt", value=True)
158
- num_variants = st.sidebar.slider("N煤mero de im谩genes a generar", 1, 8, 1)
159
-
160
  if prompt_checkbox:
161
- prompts = await generate_variations(f"{prompt}, estilo: {style_type}", num_variants, prompt_checkbox, style_type)
 
162
  else:
163
- prompts = [f"{prompt}, estilo: {style_type}"]
164
 
165
- if st.sidebar.button("Generar Im谩genes"):
166
- try:
167
- results = await gen(prompts, width, height, model_option, num_variants, prompt_checkbox, seed)
168
- for result in results:
169
- st.image(result, caption="Imagen Generada", use_column_width=True)
170
- except Exception as e:
171
- st.error(f"Error al generar las im谩genes: {str(e)}")
172
 
173
- images = list_saved_images()
174
- if images:
175
- show_image_gallery(images)
176
-
177
- st.sidebar.subheader("Herramientas Adicionales")
178
- source_image = st.sidebar.file_uploader("Cargar Imagen Fuente para FaceSwap", type=["jpg", "jpeg", "png"])
179
- target_image_path = st.sidebar.selectbox("Seleccionar Imagen Objetivo", images)
180
-
181
- if source_image and target_image_path:
182
- output_path = TEMP_PATH / "face_swap_result.jpg"
183
- if st.sidebar.button("Intercambiar Rostros"):
184
- result = swap_faces(source_image, target_image_path, output_path)
185
- if result:
186
- st.image(result, caption="Resultado de FaceSwap", use_column_width=True)
187
- else:
188
- st.info("No hay im谩genes disponibles en la galer铆a.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
 
190
- if __name__ == "__main__":
191
- import asyncio
192
- asyncio.run(main())
 
 
 
1
  from pathlib import Path
2
+ from PIL import Image
3
  import streamlit as st
4
+ import insightface
5
+ from insightface.app import FaceAnalysis
6
  from huggingface_hub import InferenceClient, AsyncInferenceClient
7
+ import asyncio
8
+ import os
9
+ import random
10
+ import numpy as np
11
+ import yaml
12
 
13
+ try:
14
+ with open("config.yaml", "r") as file:
15
+ credentials = yaml.safe_load(file)
16
+ except Exception as e:
17
+ st.error(f"Error al cargar el archivo de configuraci贸n: {e}")
18
+ credentials = {"username": "", "password": ""}
19
+
20
+ MAX_SEED = np.iinfo(np.int32).max
21
  client = AsyncInferenceClient()
22
  llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
23
+ DATA_PATH = Path("./data")
24
+ DATA_PATH.mkdir(exist_ok=True)
25
+ PREDEFINED_SEED = random.randint(0, MAX_SEED)
26
+ HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
27
 
28
+ if not HF_TOKEN_UPSCALER:
29
+ st.warning("HF_TOKEN_UPSCALER no est谩 configurado. Algunas funcionalidades pueden no funcionar.")
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ def get_upscale_finegrain(prompt, img_path, upscale_factor):
32
  try:
33
+ upscale_client = InferenceClient("fal/AuraSR-v2", hf_token=HF_TOKEN_UPSCALER)
34
+ result = upscale_client.predict(input_image=handle_file(img_path), prompt=prompt, upscale_factor=upscale_factor)
35
+ return result[1] if isinstance(result, list) and len(result) > 1 else None
 
 
36
  except Exception as e:
37
+ st.error(f"Error al mejorar la imagen: {e}")
38
  return None
39
 
40
+ def authenticate_user(username, password):
41
+ return username == credentials["username"] and password == credentials["password"]
42
+
43
+ def prepare_face_app():
44
+ app = FaceAnalysis(name='buffalo_l')
45
+ app.prepare(ctx_id=0, det_size=(640, 640))
46
+ swapper = insightface.model_zoo.get_model('onix.onnx')
47
+ return app, swapper
48
+
49
+ app, swapper = prepare_face_app()
50
+
51
+ def sort_faces(faces):
52
+ return sorted(faces, key=lambda x: x.bbox[0])
53
+
54
+ def get_face(faces, face_id):
55
+ if not faces or len(faces) < face_id:
56
+ raise ValueError("Rostro no disponible.")
57
+ return faces[face_id - 1]
58
+
59
+ def swap_faces(source_image, source_face_index, destination_image, destination_face_index):
60
+ faces = sort_faces(app.get(source_image))
61
+ source_face = get_face(faces, source_face_index)
62
+
63
+ res_faces = sort_faces(app.get(destination_image))
64
+ if destination_face_index > len(res_faces) or destination_face_index < 1:
65
+ raise ValueError("脥ndice de rostro de destino no v谩lido.")
66
+
67
+ res_face = get_face(res_faces, destination_face_index)
68
+ result = swapper.get(destination_image, res_face, source_face, paste_back=True)
69
+ return result
70
+
71
+ async def generate_image(prompt, width, height, seed, model_name):
72
+ if seed == -1:
73
+ seed = random.randint(0, MAX_SEED)
74
+ image = await client.text_to_image(prompt=prompt, height=height, width=width, model=model_name)
75
+ return image, seed
76
 
77
+ async def gen(prompts, width, height, model_name, num_variants=1, use_enhanced=True):
78
  images = []
79
  try:
80
  for idx, prompt in enumerate(prompts):
81
+ seed = random.randint(0, MAX_SEED)
82
+ image, seed = await generate_image(prompt, width, height, seed, model_name)
83
+ image_path = save_image(image, f"generated_image_{seed}.jpg")
84
  if image_path:
85
  st.success(f"Imagen {idx + 1} generada")
86
  images.append(str(image_path))
 
88
  st.error(f"Error al generar im谩genes: {e}")
89
  return images
90
 
 
 
 
 
 
 
91
  def list_saved_images():
92
+ return list(DATA_PATH.glob("*.jpg"))
93
+
94
+ def display_gallery():
95
+ st.header("Galer铆a de Im谩genes Guardadas")
96
+ images = list_saved_images()
97
+ if images:
98
+ cols = st.columns(8)
99
+ for i, image_file in enumerate(images):
100
+ with cols[i % 8]:
101
+ st.image(str(image_file), caption=image_file.name, use_column_width=True)
102
+ prompt = get_prompt_for_image(image_file.name)
103
+ st.write(prompt[:300])
104
+
105
+ if st.button(f"FaceSwap", key=f"select_{i}_{image_file.name}"):
106
+ st.session_state['generated_image_path'] = str(image_file)
107
+ st.success("Imagen seleccionada")
108
+
109
+ if st.button(f"Borrar", key=f"delete_{i}_{image_file.name}"):
110
+ if os.path.exists(image_file):
111
+ os.remove(image_file)
112
+ st.success("Imagen borrada")
113
+ display_gallery()
114
+ else:
115
+ st.warning("La imagen no existe.")
116
+ else:
117
+ st.info("No hay im谩genes guardadas.")
118
+
119
+ def save_prompt(prompt):
120
+ with open(DATA_PATH / "prompts.txt", "a") as f:
121
+ f.write(prompt + "\n")
122
+ st.success("Prompt guardado.")
123
+
124
+ def run_async(func, *args):
125
+ return asyncio.run(func(*args))
126
+
127
+ async def improve_prompt(prompt):
128
+ try:
129
+ instructions = [
130
+ "With my idea create a vibrant description for a detailed txt2img prompt, 300 characters max.",
131
+ "With my idea write a creative and detailed text-to-image prompt in English, 300 characters max.",
132
+ "With my idea generate a descriptive and visual txt2img prompt in English, 300 characters max.",
133
+ "With my idea describe a photorealistic with illumination txt2img prompt in English, 300 characters max.",
134
+ "With my idea give a realistic and elegant txt2img prompt in English, 300 characters max.",
135
+ "With my idea conform a visually dynamic and surreal txt2img prompt in English, 300 characters max.",
136
+ "With my idea realize an artistic and cinematic txt2img prompt in English, 300 characters max.",
137
+ "With my idea make a narrative and immersive txt2img prompt in English, 300 characters max."
138
+ ]
139
+ instruction = random.choice(instructions)
140
+ formatted_prompt = f"{prompt}: {instruction}"
141
+ response = llm_client.text_generation(formatted_prompt, max_new_tokens=100)
142
+ return response['generated_text'][:100] if 'generated_text' in response else response.strip()
143
+ except Exception as e:
144
+ return f"Error mejorando el prompt: {e}"
145
+
146
+ def generate_variations(prompt, num_variants, use_enhanced):
147
+ prompts = set()
148
+ while len(prompts) < num_variants:
149
+ if use_enhanced:
150
+ enhanced_prompt = improve_prompt(prompt)
151
+ prompts.add(enhanced_prompt)
152
+ else:
153
+ prompts.add(prompt)
154
+ return list(prompts)
155
+
156
+ def get_prompt_for_image(image_name):
157
+ prompts = {}
158
+ try:
159
+ with open(DATA_PATH / "prompts.txt", "r") as f:
160
+ for line in f:
161
+ if line.startswith(image_name):
162
+ prompts[image_name] = line.split(": ", 1)[1].strip()
163
+ except FileNotFoundError:
164
+ return "No hay prompt asociado."
165
+
166
+ return prompts.get(image_name, "No hay prompt asociado.")
167
 
168
  def login_form():
169
  st.title("Iniciar Sesi贸n")
170
  username = st.text_input("Usuario", value="admin")
171
+ password = st.text_input("Contrase帽a", value="flux3x", type="password")
172
  if st.button("Iniciar Sesi贸n"):
173
  if authenticate_user(username, password):
174
  st.success("Autenticaci贸n exitosa.")
 
176
  else:
177
  st.error("Credenciales incorrectas. Intenta de nuevo.")
178
 
179
+ def save_image(image, filename):
 
 
 
 
 
 
 
 
 
 
180
  try:
181
+ image_path = DATA_PATH / filename
182
+ image.save(image_path)
183
+ return image_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  except Exception as e:
185
+ st.error(f"Error al guardar la imagen: {e}")
186
  return None
187
 
188
+ def upload_image_to_gallery():
189
+ uploaded_image = st.sidebar.file_uploader("Sube una imagen a la galer铆a", type=["jpg", "jpeg", "png"])
190
+ if uploaded_image:
191
+ image = Image.open(uploaded_image)
192
+ image_path = save_image(image, f"{uploaded_image.name}")
193
+ if image_path:
194
+ save_prompt("uploaded by user")
195
+ st.sidebar.success(f"Imagen subida: {image_path}")
196
+
197
  async def main():
198
+ st.set_page_config(layout="wide")
199
+
200
  if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
201
  login_form()
202
  return
203
 
204
+ st.title("Flux +Upscale +Prompt Enhancer +FaceSwap")
205
+ generated_image_path = st.session_state.get('generated_image_path')
206
  prompt = st.sidebar.text_area("Descripci贸n de la imagen", height=150, max_chars=500)
207
+ format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9", "1:1"])
208
+ model_option = st.sidebar.selectbox("Modelo", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-dev"])
209
+ prompt_checkbox = st.sidebar.checkbox("Mejorar Prompt")
210
+ upscale_checkbox = st.sidebar.checkbox("Escalar imagen")
211
+ width, height = (720, 1280) if format_option == "9:16" else (1280, 720) if format_option == "16:9" else (1280, 1280)
212
+ num_variants = st.sidebar.slider("N煤mero de im谩genes a generar", 1, 8, 1) if prompt_checkbox else 1
213
+
 
 
 
 
214
  if prompt_checkbox:
215
+ with st.spinner("Generando prompts mejorados..."):
216
+ prompts = generate_variations(prompt, num_variants, True)
217
  else:
218
+ prompts = [prompt]
219
 
220
+ upload_image_to_gallery()
 
 
 
 
 
 
221
 
222
+ if st.sidebar.button("Generar Im谩genes"):
223
+ with st.spinner("Generando im谩genes..."):
224
+ try:
225
+ results = await gen(prompts, width, height, model_option, num_variants, prompt_checkbox)
226
+ st.session_state['generated_image_paths'] = results
227
+ for result in results:
228
+ st.image(result, caption="Imagen Generada")
229
+ except Exception as e:
230
+ st.error(f"Error al generar las im谩genes: {str(e)}")
231
+
232
+ if generated_image_path:
233
+ if upscale_checkbox:
234
+ with st.spinner("Escalando imagen..."):
235
+ try:
236
+ upscale_image_path = get_upscale_finegrain("Upscale", generated_image_path, 2)
237
+ if upscale_image_path:
238
+ st.image(upscale_image_path, caption="Imagen Escalada")
239
+ except Exception as e:
240
+ st.error(f"Error al escalar la imagen: {str(e)}")
241
+
242
+ st.header("Intercambio de Rostros")
243
+ source_image_file = st.file_uploader("Imagen de Origen", type=["jpg", "jpeg", "png"])
244
+
245
+ if source_image_file is not None:
246
+ try:
247
+ source_image = Image.open(source_image_file)
248
+ except Exception as e:
249
+ st.error(f"Error al cargar la imagen de origen: {str(e)}")
250
+ source_image = None
251
+ else:
252
+ source_image = Image.open("face.jpg")
253
 
254
+ source_face_index