Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,80 +1,75 @@
|
|
1 |
from pathlib import Path
|
2 |
from PIL import Image
|
3 |
import streamlit as st
|
4 |
-
import insightface
|
5 |
-
from insightface.app import FaceAnalysis
|
6 |
-
from huggingface_hub import InferenceClient
|
7 |
import os, random, numpy as np, yaml, time, logging
|
8 |
from dataclasses import dataclass, field
|
9 |
from typing import List
|
|
|
|
|
10 |
st.set_page_config(layout="wide")
|
|
|
11 |
try:
|
12 |
-
with open("config.yaml", "r") as file:
|
|
|
13 |
except Exception as e:
|
14 |
st.error(f"Error al cargar el archivo de configuraci贸n: {e}")
|
15 |
credentials = {"username": "", "password": ""}
|
|
|
16 |
@dataclass
|
17 |
class AppConfig:
|
18 |
MAX_SEED: int = 1000000
|
19 |
CLEANUP_DAYS: int = 7
|
|
|
20 |
MAX_SEED = AppConfig.MAX_SEED
|
21 |
client = InferenceClient()
|
22 |
DATA_PATH = Path("./data")
|
23 |
DATA_PATH.mkdir(exist_ok=True)
|
24 |
HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN")
|
25 |
-
|
26 |
-
def prepare_face_app():
|
27 |
-
app = FaceAnalysis(name='buffalo_l')
|
28 |
-
app.prepare(ctx_id=0, det_size=(640, 640))
|
29 |
-
swapper = insightface.model_zoo.get_model('onix.onnx')
|
30 |
-
return app, swapper
|
31 |
-
def get_upscale_finegrain(prompt, img_path, upscale_factor):
|
32 |
-
try:
|
33 |
-
upscale_client = InferenceClient("fal/AuraSR-v2", hf_token=HF_TOKEN_UPSCALER)
|
34 |
-
result = upscale_client.predict(input_image=open(img_path, "rb").read(), prompt=prompt, upscale_factor=upscale_factor)
|
35 |
-
return result[1] if isinstance(result, list) and len(result) > 1 else None
|
36 |
-
except Exception as e:
|
37 |
-
st.error(f"Error al mejorar la imagen: {e}")
|
38 |
-
return None
|
39 |
def cleanup_old_images(max_age_days=AppConfig.CLEANUP_DAYS):
|
40 |
current_time = time.time()
|
41 |
for image_file in DATA_PATH.glob("*.jpg"):
|
42 |
-
if current_time - image_file.stat().st_mtime > max_age_days * 86400:
|
43 |
-
|
44 |
-
|
45 |
-
def
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
source_face = get_face(faces, source_face_index)
|
52 |
-
res_faces = sort_faces(app.get(destination_image))
|
53 |
-
if destination_face_index > len(res_faces) or destination_face_index < 1: raise ValueError("脥ndice de rostro de destino no v谩lido.")
|
54 |
-
res_face = get_face(res_faces, destination_face_index)
|
55 |
-
result = swapper.get(destination_image, res_face, source_face, paste_back=True)
|
56 |
-
return result
|
57 |
def enhance_prompt(text):
|
58 |
try:
|
59 |
-
enhanced = client.text_generation(
|
60 |
-
|
|
|
|
|
|
|
61 |
return enhanced[:200]
|
62 |
-
except:
|
|
|
|
|
63 |
def save_prompt(image_name, prompt):
|
64 |
-
with open(DATA_PATH / "prompts.txt", "a") as f:
|
|
|
|
|
65 |
def generate_variations(prompt, num_variants=8, use_enhanced=True):
|
66 |
-
instructions = [
|
|
|
67 |
"Write a creative, realistic, and detailed text-to-image prompt in English: ",
|
68 |
"Generate a descriptive and true to life txt2img prompt in English: ",
|
69 |
"Describe a photorealistic scene with detailed illumination for a txt2img prompt: ",
|
70 |
"Give a realistic, elegant txt2img prompt in English, emphasizing photorealism: ",
|
71 |
"Create a visually dynamic and hyperrealistic txt2img prompt in English: ",
|
72 |
"Write a cinematic txt2img prompt in English with hyperrealistic elements: ",
|
73 |
-
"Make a lifelike txt2img prompt in English, focusing on photorealistic depth: "
|
|
|
74 |
if use_enhanced:
|
75 |
prompts = [enhance_prompt(f"{instructions[i % len(instructions)]}{prompt}") for i in range(num_variants)]
|
76 |
-
else:
|
|
|
77 |
return prompts
|
|
|
78 |
def generate_image(prompt, width, height, seed, model_name):
|
79 |
try:
|
80 |
with st.spinner("Generando imagen..."):
|
@@ -85,12 +80,15 @@ def generate_image(prompt, width, height, seed, model_name):
|
|
85 |
except Exception as e:
|
86 |
st.error(f"Error al generar imagen: {e}")
|
87 |
return None, seed, None
|
|
|
88 |
def gen(prompts, width, height, model_name, num_variants=8):
|
89 |
images = []
|
90 |
seeds = []
|
91 |
while len(seeds) < num_variants:
|
92 |
seed = random.randint(0, MAX_SEED)
|
93 |
-
if seed not in seeds:
|
|
|
|
|
94 |
for i in range(num_variants):
|
95 |
current_prompt = prompts[i] if len(prompts) > i else prompts[-1]
|
96 |
with st.spinner(f"Generando imagen {i+1}/{num_variants}"):
|
@@ -99,37 +97,37 @@ def gen(prompts, width, height, model_name, num_variants=8):
|
|
99 |
image_path = DATA_PATH / f"generated_image_{used_seed}.jpg"
|
100 |
image.save(image_path)
|
101 |
save_prompt(f"generated_image_{used_seed}.jpg", enhanced_prompt)
|
102 |
-
images.append(str(image_path))
|
103 |
st.success(f"Imagen {i+1} generada")
|
104 |
return images
|
|
|
105 |
def display_gallery():
|
106 |
st.header("Galer铆a de Im谩genes Guardadas")
|
107 |
images = list_saved_images()
|
108 |
-
|
109 |
-
selected_image = st.radio("Seleccionar imagen para FaceSwap", options=image_options.keys(), index=None)
|
110 |
-
if selected_image:
|
111 |
-
st.session_state['generated_image_path'] = selected_image
|
112 |
-
prompt = get_prompt_for_image(Path(selected_image).name)
|
113 |
-
st.write(f"Prompt: {prompt[:100]}")
|
114 |
-
if selected_image and st.button("Aplicar FaceSwap"):
|
115 |
-
st.success("Imagen seleccionada para FaceSwap")
|
116 |
if images:
|
117 |
cols = st.columns(4)
|
118 |
for i, image_file in enumerate(images):
|
119 |
with cols[i % 4]:
|
120 |
st.image(str(image_file), use_column_width=True)
|
|
|
|
|
121 |
if st.button(f"Borrar", key=f"delete_{i}_{image_file}"):
|
122 |
if image_file.exists():
|
123 |
os.remove(image_file)
|
124 |
st.success("Imagen borrada")
|
125 |
st.rerun()
|
|
|
126 |
def get_prompt_for_image(image_name):
|
127 |
try:
|
128 |
with open(DATA_PATH / "prompts.txt", "r") as f:
|
129 |
for line in f:
|
130 |
-
if line.startswith(image_name):
|
131 |
-
|
|
|
|
|
132 |
return "No hay prompt asociado"
|
|
|
133 |
def login_form():
|
134 |
st.title("Iniciar Sesi贸n")
|
135 |
username = st.text_input("Usuario", value="admin")
|
@@ -138,7 +136,9 @@ def login_form():
|
|
138 |
if authenticate_user(username, password):
|
139 |
st.session_state['authenticated'] = True
|
140 |
st.success("Autenticaci贸n exitosa.")
|
141 |
-
else:
|
|
|
|
|
142 |
def upload_image_to_gallery():
|
143 |
uploaded_image = st.sidebar.file_uploader("Sube una imagen a la galer铆a", type=["jpg", "jpeg", "png"])
|
144 |
if uploaded_image:
|
@@ -147,27 +147,37 @@ def upload_image_to_gallery():
|
|
147 |
image.save(image_path)
|
148 |
save_prompt(uploaded_image.name, "uploaded by user")
|
149 |
st.sidebar.success(f"Imagen subida: {image_path}")
|
150 |
-
|
151 |
def main():
|
152 |
if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
|
153 |
login_form()
|
154 |
return
|
155 |
-
|
156 |
-
|
|
|
157 |
prompt = st.sidebar.text_area("Descripci贸n de la imagen", height=150, max_chars=500)
|
158 |
format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9", "1:1"])
|
159 |
model_option = st.sidebar.selectbox("Modelo", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-dev"])
|
160 |
-
upscale_checkbox = st.sidebar.checkbox("Escalar imagen")
|
161 |
prompt_enhance = st.sidebar.checkbox("Mejorar Prompt", True)
|
162 |
num_variants = st.sidebar.slider("N煤mero de im谩genes", 1, 8, 8)
|
|
|
163 |
width, height = (720, 1280) if format_option == "9:16" else (1280, 720) if format_option == "16:9" else (1280, 1280)
|
|
|
164 |
if prompt:
|
165 |
prompts = generate_variations(prompt, num_variants=num_variants, use_enhanced=prompt_enhance)
|
166 |
-
if st.sidebar.button("Generar Im谩genes"):
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
upload_image_to_gallery()
|
172 |
display_gallery()
|
173 |
-
|
|
|
|
|
|
1 |
from pathlib import Path
|
2 |
from PIL import Image
|
3 |
import streamlit as st
|
|
|
|
|
|
|
4 |
import os, random, numpy as np, yaml, time, logging
|
5 |
from dataclasses import dataclass, field
|
6 |
from typing import List
|
7 |
+
from huggingface_hub import InferenceClient
|
8 |
+
|
9 |
st.set_page_config(layout="wide")
|
10 |
+
|
11 |
try:
|
12 |
+
with open("config.yaml", "r") as file:
|
13 |
+
credentials = yaml.safe_load(file)
|
14 |
except Exception as e:
|
15 |
st.error(f"Error al cargar el archivo de configuraci贸n: {e}")
|
16 |
credentials = {"username": "", "password": ""}
|
17 |
+
|
18 |
@dataclass
|
19 |
class AppConfig:
|
20 |
MAX_SEED: int = 1000000
|
21 |
CLEANUP_DAYS: int = 7
|
22 |
+
|
23 |
MAX_SEED = AppConfig.MAX_SEED
|
24 |
client = InferenceClient()
|
25 |
DATA_PATH = Path("./data")
|
26 |
DATA_PATH.mkdir(exist_ok=True)
|
27 |
HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN")
|
28 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
def cleanup_old_images(max_age_days=AppConfig.CLEANUP_DAYS):
|
30 |
current_time = time.time()
|
31 |
for image_file in DATA_PATH.glob("*.jpg"):
|
32 |
+
if current_time - image_file.stat().st_mtime > max_age_days * 86400:
|
33 |
+
os.remove(image_file)
|
34 |
+
|
35 |
+
def authenticate_user(username, password):
|
36 |
+
return username == credentials["username"] and password == credentials["password"]
|
37 |
+
|
38 |
+
def list_saved_images():
|
39 |
+
return sorted(DATA_PATH.glob("*.jpg"), key=lambda x: x.stat().st_mtime, reverse=True)
|
40 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
def enhance_prompt(text):
|
42 |
try:
|
43 |
+
enhanced = client.text_generation(
|
44 |
+
"With this text, generate a descriptive and photorealistic txt2img prompt in English in 200 characters maximum: " + text,
|
45 |
+
model="mistralai/Mixtral-8x7B-v0.1",
|
46 |
+
max_length=200
|
47 |
+
)
|
48 |
return enhanced[:200]
|
49 |
+
except:
|
50 |
+
return text[:200]
|
51 |
+
|
52 |
def save_prompt(image_name, prompt):
|
53 |
+
with open(DATA_PATH / "prompts.txt", "a") as f:
|
54 |
+
f.write(f"{image_name}: {prompt}\n")
|
55 |
+
|
56 |
def generate_variations(prompt, num_variants=8, use_enhanced=True):
|
57 |
+
instructions = [
|
58 |
+
"Create a photorealistic description for a detailed txt2img prompt in English: ",
|
59 |
"Write a creative, realistic, and detailed text-to-image prompt in English: ",
|
60 |
"Generate a descriptive and true to life txt2img prompt in English: ",
|
61 |
"Describe a photorealistic scene with detailed illumination for a txt2img prompt: ",
|
62 |
"Give a realistic, elegant txt2img prompt in English, emphasizing photorealism: ",
|
63 |
"Create a visually dynamic and hyperrealistic txt2img prompt in English: ",
|
64 |
"Write a cinematic txt2img prompt in English with hyperrealistic elements: ",
|
65 |
+
"Make a lifelike txt2img prompt in English, focusing on photorealistic depth: "
|
66 |
+
]
|
67 |
if use_enhanced:
|
68 |
prompts = [enhance_prompt(f"{instructions[i % len(instructions)]}{prompt}") for i in range(num_variants)]
|
69 |
+
else:
|
70 |
+
prompts = [prompt] * num_variants
|
71 |
return prompts
|
72 |
+
|
73 |
def generate_image(prompt, width, height, seed, model_name):
|
74 |
try:
|
75 |
with st.spinner("Generando imagen..."):
|
|
|
80 |
except Exception as e:
|
81 |
st.error(f"Error al generar imagen: {e}")
|
82 |
return None, seed, None
|
83 |
+
|
84 |
def gen(prompts, width, height, model_name, num_variants=8):
|
85 |
images = []
|
86 |
seeds = []
|
87 |
while len(seeds) < num_variants:
|
88 |
seed = random.randint(0, MAX_SEED)
|
89 |
+
if seed not in seeds:
|
90 |
+
seeds.append(seed)
|
91 |
+
|
92 |
for i in range(num_variants):
|
93 |
current_prompt = prompts[i] if len(prompts) > i else prompts[-1]
|
94 |
with st.spinner(f"Generando imagen {i+1}/{num_variants}"):
|
|
|
97 |
image_path = DATA_PATH / f"generated_image_{used_seed}.jpg"
|
98 |
image.save(image_path)
|
99 |
save_prompt(f"generated_image_{used_seed}.jpg", enhanced_prompt)
|
100 |
+
images.append((str(image_path), enhanced_prompt))
|
101 |
st.success(f"Imagen {i+1} generada")
|
102 |
return images
|
103 |
+
|
104 |
def display_gallery():
|
105 |
st.header("Galer铆a de Im谩genes Guardadas")
|
106 |
images = list_saved_images()
|
107 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
if images:
|
109 |
cols = st.columns(4)
|
110 |
for i, image_file in enumerate(images):
|
111 |
with cols[i % 4]:
|
112 |
st.image(str(image_file), use_column_width=True)
|
113 |
+
prompt = get_prompt_for_image(image_file.name)
|
114 |
+
st.caption(prompt[:100])
|
115 |
if st.button(f"Borrar", key=f"delete_{i}_{image_file}"):
|
116 |
if image_file.exists():
|
117 |
os.remove(image_file)
|
118 |
st.success("Imagen borrada")
|
119 |
st.rerun()
|
120 |
+
|
121 |
def get_prompt_for_image(image_name):
|
122 |
try:
|
123 |
with open(DATA_PATH / "prompts.txt", "r") as f:
|
124 |
for line in f:
|
125 |
+
if line.startswith(image_name):
|
126 |
+
return line.split(": ", 1)[1].strip()
|
127 |
+
except FileNotFoundError:
|
128 |
+
return "No hay prompt asociado"
|
129 |
return "No hay prompt asociado"
|
130 |
+
|
131 |
def login_form():
|
132 |
st.title("Iniciar Sesi贸n")
|
133 |
username = st.text_input("Usuario", value="admin")
|
|
|
136 |
if authenticate_user(username, password):
|
137 |
st.session_state['authenticated'] = True
|
138 |
st.success("Autenticaci贸n exitosa.")
|
139 |
+
else:
|
140 |
+
st.error("Credenciales incorrectas. Intenta de nuevo.")
|
141 |
+
|
142 |
def upload_image_to_gallery():
|
143 |
uploaded_image = st.sidebar.file_uploader("Sube una imagen a la galer铆a", type=["jpg", "jpeg", "png"])
|
144 |
if uploaded_image:
|
|
|
147 |
image.save(image_path)
|
148 |
save_prompt(uploaded_image.name, "uploaded by user")
|
149 |
st.sidebar.success(f"Imagen subida: {image_path}")
|
150 |
+
|
151 |
def main():
|
152 |
if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
|
153 |
login_form()
|
154 |
return
|
155 |
+
|
156 |
+
st.title("Flux +Upscale +Prompt Enhancer")
|
157 |
+
|
158 |
prompt = st.sidebar.text_area("Descripci贸n de la imagen", height=150, max_chars=500)
|
159 |
format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9", "1:1"])
|
160 |
model_option = st.sidebar.selectbox("Modelo", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-dev"])
|
|
|
161 |
prompt_enhance = st.sidebar.checkbox("Mejorar Prompt", True)
|
162 |
num_variants = st.sidebar.slider("N煤mero de im谩genes", 1, 8, 8)
|
163 |
+
|
164 |
width, height = (720, 1280) if format_option == "9:16" else (1280, 720) if format_option == "16:9" else (1280, 1280)
|
165 |
+
|
166 |
if prompt:
|
167 |
prompts = generate_variations(prompt, num_variants=num_variants, use_enhanced=prompt_enhance)
|
168 |
+
if st.sidebar.button("Generar Im谩genes"):
|
169 |
+
generated_images = gen(prompts, width, height, model_option, num_variants)
|
170 |
+
|
171 |
+
# Display generated images with their prompts
|
172 |
+
st.header("Im谩genes Generadas")
|
173 |
+
cols = st.columns(4)
|
174 |
+
for i, (image_path, image_prompt) in enumerate(generated_images):
|
175 |
+
with cols[i % 4]:
|
176 |
+
st.image(image_path, use_column_width=True)
|
177 |
+
st.caption(image_prompt)
|
178 |
+
|
179 |
upload_image_to_gallery()
|
180 |
display_gallery()
|
181 |
+
|
182 |
+
if __name__ == "__main__":
|
183 |
+
main()
|