ginipick commited on
Commit
f5f2258
·
verified ·
1 Parent(s): cc819c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1039
app.py CHANGED
@@ -1,1040 +1,2 @@
1
  import os
2
- huggingface_token = os.getenv("HF_TOKEN")
3
- if not huggingface_token:
4
- print("Warning: Hugging Face token is not set.")
5
-
6
- import gradio as gr
7
- import json
8
- import logging
9
- import torch
10
- from PIL import Image
11
- import spaces
12
- from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
13
- from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
14
- from diffusers.utils import load_image
15
- from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
16
- import copy
17
- import random
18
- import time
19
- import requests
20
- import pandas as pd
21
- from transformers import pipeline
22
- from gradio_imageslider import ImageSlider
23
- import numpy as np
24
- import warnings
25
-
26
- try:
27
- translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device="cpu", token=huggingface_token)
28
- except Exception as e:
29
- print(f"Translation model load failed: {str(e)}")
30
- # If the translation model fails to load, return the original text
31
- def translator(text, max_length=512):
32
- return [{'translation_text': text}]
33
-
34
- # Load prompts for randomization
35
- df = pd.read_csv('prompts.csv', header=None)
36
- prompt_values = df.values.flatten()
37
-
38
- # Load LoRAs from JSON file
39
- with open('loras.json', 'r') as f:
40
- loras = json.load(f)
41
-
42
- # Load base FLUX model
43
- dtype = torch.bfloat16
44
- device = "cuda" if torch.cuda.is_available() else "cpu"
45
-
46
- base_model = "black-forest-labs/FLUX.1-dev"
47
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
48
-
49
- # Settings for LoRA
50
- taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
51
- good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
52
-
53
- # Set up image-to-image pipeline
54
- pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
55
- base_model,
56
- vae=good_vae,
57
- transformer=pipe.transformer,
58
- text_encoder=pipe.text_encoder,
59
- tokenizer=pipe.tokenizer,
60
- text_encoder_2=pipe.text_encoder_2,
61
- tokenizer_2=pipe.tokenizer_2,
62
- torch_dtype=dtype
63
- ).to(device)
64
-
65
- MAX_SEED = 2**32 - 1
66
- MAX_PIXEL_BUDGET = 1024 * 1024
67
-
68
- pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
69
-
70
- class calculateDuration:
71
- def __init__(self, activity_name=""):
72
- self.activity_name = activity_name
73
-
74
- def __enter__(self):
75
- self.start_time = time.time()
76
- return self
77
-
78
- def __exit__(self, exc_type, exc_value, traceback):
79
- self.end_time = time.time()
80
- self.elapsed_time = self.end_time - self.start_time
81
- if self.activity_name:
82
- print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
83
- else:
84
- print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
85
-
86
- def download_file(url, directory=None):
87
- if directory is None:
88
- directory = os.getcwd() # Use current working directory if not specified
89
-
90
- # Get the filename from the URL
91
- filename = url.split('/')[-1]
92
-
93
- # Full path for the downloaded file
94
- filepath = os.path.join(directory, filename)
95
-
96
- # Download the file
97
- response = requests.get(url)
98
- response.raise_for_status() # Raise an exception for bad status codes
99
-
100
- # Write the content to the file
101
- with open(filepath, 'wb') as file:
102
- file.write(response.content)
103
-
104
- return filepath
105
-
106
- def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
107
- selected_index = evt.index
108
- selected_indices = selected_indices or []
109
- if selected_index in selected_indices:
110
- selected_indices.remove(selected_index)
111
- else:
112
- if len(selected_indices) < 3:
113
- selected_indices.append(selected_index)
114
- else:
115
- gr.Warning("You can select up to 3 LoRAs, remove one to select a new one.")
116
- return gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), width, height, gr.update(), gr.update(), gr.update()
117
-
118
- selected_info_1 = "Select LoRA 1"
119
- selected_info_2 = "Select LoRA 2"
120
- selected_info_3 = "Select LoRA 3"
121
-
122
- lora_scale_1 = 1.15
123
- lora_scale_2 = 1.15
124
- lora_scale_3 = 1.15
125
- lora_image_1 = None
126
- lora_image_2 = None
127
- lora_image_3 = None
128
-
129
- if len(selected_indices) >= 1:
130
- lora1 = loras_state[selected_indices[0]]
131
- selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
132
- lora_image_1 = lora1['image']
133
- if len(selected_indices) >= 2:
134
- lora2 = loras_state[selected_indices[1]]
135
- selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
136
- lora_image_2 = lora2['image']
137
- if len(selected_indices) >= 3:
138
- lora3 = loras_state[selected_indices[2]]
139
- selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}](https://huggingface.co/{lora3['repo']}) ✨"
140
- lora_image_3 = lora3['image']
141
-
142
- if selected_indices:
143
- last_selected_lora = loras_state[selected_indices[-1]]
144
- new_placeholder = f"Type a prompt for {last_selected_lora['title']}"
145
- else:
146
- new_placeholder = "Type a prompt after selecting a LoRA"
147
-
148
- return gr.update(placeholder=new_placeholder), selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, width, height, lora_image_1, lora_image_2, lora_image_3
149
-
150
- def remove_lora(selected_indices, loras_state, index_to_remove):
151
- if len(selected_indices) > index_to_remove:
152
- selected_indices.pop(index_to_remove)
153
-
154
- selected_info_1 = "Select LoRA 1"
155
- selected_info_2 = "Select LoRA 2"
156
- selected_info_3 = "Select LoRA 3"
157
- lora_scale_1 = 1.15
158
- lora_scale_2 = 1.15
159
- lora_scale_3 = 1.15
160
- lora_image_1 = None
161
- lora_image_2 = None
162
- lora_image_3 = None
163
-
164
- for i, idx in enumerate(selected_indices):
165
- lora = loras_state[idx]
166
- if i == 0:
167
- selected_info_1 = f"### LoRA 1 Selected: [{lora['title']}]({lora['repo']}) ✨"
168
- lora_image_1 = lora['image']
169
- elif i == 1:
170
- selected_info_2 = f"### LoRA 2 Selected: [{lora['title']}]({lora['repo']}) ✨"
171
- lora_image_2 = lora['image']
172
- elif i == 2:
173
- selected_info_3 = f"### LoRA 3 Selected: [{lora['title']}]({lora['repo']}) ✨"
174
- lora_image_3 = lora['image']
175
-
176
- return selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3
177
-
178
- def remove_lora_1(selected_indices, loras_state):
179
- return remove_lora(selected_indices, loras_state, 0)
180
-
181
- def remove_lora_2(selected_indices, loras_state):
182
- return remove_lora(selected_indices, loras_state, 1)
183
-
184
- def remove_lora_3(selected_indices, loras_state):
185
- return remove_lora(selected_indices, loras_state, 2)
186
-
187
- def randomize_loras(selected_indices, loras_state):
188
- try:
189
- if len(loras_state) < 3:
190
- raise gr.Error("Not enough LoRAs to randomize.")
191
- selected_indices = random.sample(range(len(loras_state)), 3)
192
- lora1 = loras_state[selected_indices[0]]
193
- lora2 = loras_state[selected_indices[1]]
194
- lora3 = loras_state[selected_indices[2]]
195
- selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
196
- selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
197
- selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}](https://huggingface.co/{lora3['repo']}) ✨"
198
- lora_scale_1 = 1.15
199
- lora_scale_2 = 1.15
200
- lora_scale_3 = 1.15
201
- lora_image_1 = lora1.get('image', 'path/to/default/image.png')
202
- lora_image_2 = lora2.get('image', 'path/to/default/image.png')
203
- lora_image_3 = lora3.get('image', 'path/to/default/image.png')
204
- random_prompt = random.choice(prompt_values)
205
- return selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3, random_prompt
206
- except Exception as e:
207
- print(f"Error in randomize_loras: {str(e)}")
208
- return "Error", "Error", "Error", [], 1.15, 1.15, 1.15, 'path/to/default/image.png', 'path/to/default/image.png', 'path/to/default/image.png', ""
209
-
210
- def add_custom_lora(custom_lora, selected_indices, current_loras):
211
- if custom_lora:
212
- try:
213
- title, repo, path, trigger_word, image = check_custom_model(custom_lora)
214
- print(f"Loaded custom LoRA: {repo}")
215
- existing_item_index = next((index for (index, item) in enumerate(current_loras) if item['repo'] == repo), None)
216
- if existing_item_index is None:
217
- if repo.endswith(".safetensors") and repo.startswith("http"):
218
- repo = download_file(repo)
219
- new_item = {
220
- "image": image if image else "/home/user/app/custom.png",
221
- "title": title,
222
- "repo": repo,
223
- "weights": path,
224
- "trigger_word": trigger_word
225
- }
226
- print(f"New LoRA: {new_item}")
227
- existing_item_index = len(current_loras)
228
- current_loras.append(new_item)
229
-
230
- # Update gallery
231
- gallery_items = [(item["image"], item["title"]) for item in current_loras]
232
- # Update selected_indices if there's room
233
- if len(selected_indices) < 3:
234
- selected_indices.append(existing_item_index)
235
- else:
236
- gr.Warning("You can select up to 3 LoRAs, remove one to select a new one.")
237
-
238
- # Update selected_info and images
239
- selected_info_1 = "Select a LoRA 1"
240
- selected_info_2 = "Select a LoRA 2"
241
- selected_info_3 = "Select a LoRA 3"
242
- lora_scale_1 = 1.15
243
- lora_scale_2 = 1.15
244
- lora_scale_3 = 1.15
245
- lora_image_1 = None
246
- lora_image_2 = None
247
- lora_image_3 = None
248
- if len(selected_indices) >= 1:
249
- lora1 = current_loras[selected_indices[0]]
250
- selected_info_1 = f"### LoRA 1 Selected: {lora1['title']} ✨"
251
- lora_image_1 = lora1['image'] if lora1['image'] else None
252
- if len(selected_indices) >= 2:
253
- lora2 = current_loras[selected_indices[1]]
254
- selected_info_2 = f"### LoRA 2 Selected: {lora2['title']} ✨"
255
- lora_image_2 = lora2['image'] if lora2['image'] else None
256
- if len(selected_indices) >= 3:
257
- lora3 = current_loras[selected_indices[2]]
258
- selected_info_3 = f"### LoRA 3 Selected: {lora3['title']} ✨"
259
- lora_image_3 = lora3['image'] if lora3['image'] else None
260
- print("Finished adding custom LoRA")
261
- return (
262
- current_loras,
263
- gr.update(value=gallery_items),
264
- selected_info_1,
265
- selected_info_2,
266
- selected_info_3,
267
- selected_indices,
268
- lora_scale_1,
269
- lora_scale_2,
270
- lora_scale_3,
271
- lora_image_1,
272
- lora_image_2,
273
- lora_image_3,
274
- gr.update(visible=True) # Make "Remove Custom LoRA" button visible
275
- )
276
- except Exception as e:
277
- print(e)
278
- gr.Warning(str(e))
279
- return current_loras, gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
280
- else:
281
- return current_loras, gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
282
-
283
- def remove_custom_lora(selected_indices, current_loras):
284
- if current_loras:
285
- custom_lora_repo = current_loras[-1]['repo']
286
- # Remove from loras list
287
- current_loras = current_loras[:-1]
288
- # Remove from selected_indices if selected
289
- custom_lora_index = len(current_loras)
290
- if custom_lora_index in selected_indices:
291
- selected_indices.remove(custom_lora_index)
292
- # Update gallery
293
- gallery_items = [(item["image"], item["title"]) for item in current_loras]
294
- # Update selected_info and images
295
- selected_info_1 = "Select a LoRA 1"
296
- selected_info_2 = "Select a LoRA 2"
297
- selected_info_3 = "Select a LoRA 3"
298
- lora_scale_1 = 1.15
299
- lora_scale_2 = 1.15
300
- lora_scale_3 = 1.15
301
- lora_image_1 = None
302
- lora_image_2 = None
303
- lora_image_3 = None
304
- if len(selected_indices) >= 1:
305
- lora1 = current_loras[selected_indices[0]]
306
- selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
307
- lora_image_1 = lora1['image']
308
- if len(selected_indices) >= 2:
309
- lora2 = current_loras[selected_indices[1]]
310
- selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
311
- lora_image_2 = lora2['image']
312
- if len(selected_indices) >= 3:
313
- lora3 = current_loras[selected_indices[2]]
314
- selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}]({lora3['repo']}) ✨"
315
- lora_image_3 = lora3['image']
316
- # If no custom LoRA remains, hide the "Remove Custom LoRA" button
317
- remove_button_visibility = gr.update(visible=False) if not any("custom" in lora['repo'] for lora in current_loras) else gr.update(visible=True)
318
- return (
319
- current_loras,
320
- gr.update(value=gallery_items),
321
- selected_info_1,
322
- selected_info_2,
323
- selected_info_3,
324
- selected_indices,
325
- lora_scale_1,
326
- lora_scale_2,
327
- lora_scale_3,
328
- lora_image_1,
329
- lora_image_2,
330
- lora_image_3,
331
- remove_button_visibility
332
- )
333
-
334
- @spaces.GPU(duration=75)
335
- def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
336
- print("Generating image...")
337
- pipe.to("cuda")
338
- generator = torch.Generator(device="cuda").manual_seed(seed)
339
- with calculateDuration("Generating image"):
340
- # Generate image iteratively
341
- for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
342
- prompt=prompt_mash,
343
- num_inference_steps=steps,
344
- guidance_scale=cfg_scale,
345
- width=width,
346
- height=height,
347
- generator=generator,
348
- joint_attention_kwargs={"scale": 1.0},
349
- output_type="pil",
350
- good_vae=good_vae,
351
- ):
352
- yield img
353
-
354
- @spaces.GPU(duration=75)
355
- def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
356
- pipe_i2i.to("cuda")
357
- generator = torch.Generator(device="cuda").manual_seed(seed)
358
- image_input = load_image(image_input_path)
359
- final_image = pipe_i2i(
360
- prompt=prompt_mash,
361
- image=image_input,
362
- strength=image_strength,
363
- num_inference_steps=steps,
364
- guidance_scale=cfg_scale,
365
- width=width,
366
- height=height,
367
- generator=generator,
368
- joint_attention_kwargs={"scale": 1.0},
369
- output_type="pil",
370
- ).images[0]
371
- return final_image
372
-
373
- def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
374
- try:
375
- # Detect and translate Korean text if present
376
- if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
377
- try:
378
- translated = translator(prompt, max_length=512)[0]['translation_text']
379
- print(f"Original prompt: {prompt}")
380
- print(f"Translated prompt: {translated}")
381
- prompt = translated
382
- except Exception as e:
383
- print(f"Translation failed: {str(e)}")
384
- # Use the original prompt if translation fails
385
-
386
- if not selected_indices:
387
- raise gr.Error("You must select at least one LoRA before proceeding.")
388
-
389
- selected_loras = [loras_state[idx] for idx in selected_indices]
390
-
391
- # Build the prompt with trigger words
392
- prepends = []
393
- appends = []
394
- for lora in selected_loras:
395
- trigger_word = lora.get('trigger_word', '')
396
- if trigger_word:
397
- if lora.get("trigger_position") == "prepend":
398
- prepends.append(trigger_word)
399
- else:
400
- appends.append(trigger_word)
401
- prompt_mash = " ".join(prepends + [prompt] + appends)
402
- print("Prompt Mash: ", prompt_mash)
403
-
404
- # Unload previous LoRA weights
405
- with calculateDuration("Unloading LoRA"):
406
- pipe.unload_lora_weights()
407
- pipe_i2i.unload_lora_weights()
408
-
409
- print(f"Active adapters before loading: {pipe.get_active_adapters()}")
410
-
411
- # Load LoRA weights with respective scales
412
- lora_names = []
413
- lora_weights = []
414
- with calculateDuration("Loading LoRA weights"):
415
- for idx, lora in enumerate(selected_loras):
416
- try:
417
- lora_name = f"lora_{idx}"
418
- lora_path = lora['repo']
419
- weight_name = lora.get("weights")
420
- print(f"Loading LoRA {lora_name} from {lora_path}")
421
- if image_input is not None:
422
- if weight_name:
423
- pipe_i2i.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=lora_name)
424
- else:
425
- pipe_i2i.load_lora_weights(lora_path, adapter_name=lora_name)
426
- else:
427
- if weight_name:
428
- pipe.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=lora_name)
429
- else:
430
- pipe.load_lora_weights(lora_path, adapter_name=lora_name)
431
- lora_names.append(lora_name)
432
- lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2 if idx == 1 else lora_scale_3)
433
- except Exception as e:
434
- print(f"Failed to load LoRA {lora_name}: {str(e)}")
435
-
436
- print("Loaded LoRAs:", lora_names)
437
- print("Adapter weights:", lora_weights)
438
-
439
- if lora_names:
440
- if image_input is not None:
441
- pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
442
- else:
443
- pipe.set_adapters(lora_names, adapter_weights=lora_weights)
444
- else:
445
- print("No LoRAs were successfully loaded.")
446
- return None, seed, gr.update(visible=False)
447
-
448
- print(f"Active adapters after loading: {pipe.get_active_adapters()}")
449
-
450
- # Randomize seed if requested
451
- with calculateDuration("Randomizing seed"):
452
- if randomize_seed:
453
- seed = random.randint(0, MAX_SEED)
454
-
455
- if image_input is not None:
456
- final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
457
- else:
458
- image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
459
- final_image = None
460
- step_counter = 0
461
- for image in image_generator:
462
- step_counter += 1
463
- final_image = image
464
- progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
465
- yield image, seed, gr.update(value=progress_bar, visible=True)
466
-
467
- if final_image is None:
468
- raise Exception("Failed to generate image")
469
-
470
- return final_image, seed, gr.update(visible=False)
471
-
472
- except Exception as e:
473
- print(f"Error in run_lora: {str(e)}")
474
- return None, seed, gr.update(visible=False)
475
-
476
- run_lora.zerogpu = True
477
-
478
- def get_huggingface_safetensors(link):
479
- split_link = link.split("/")
480
- if len(split_link) == 2:
481
- model_card = ModelCard.load(link)
482
- base_model = model_card.data.get("base_model")
483
- print(f"Base model: {base_model}")
484
- if base_model not in ["black-forest-labs/FLUX.1-dev", "black-forest-labs/FLUX.1-schnell"]:
485
- raise Exception("Not a FLUX LoRA!")
486
- image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
487
- trigger_word = model_card.data.get("instance_prompt", "")
488
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
489
- fs = HfFileSystem()
490
- safetensors_name = None
491
- try:
492
- list_of_files = fs.ls(link, detail=False)
493
- for file in list_of_files:
494
- if file.endswith(".safetensors"):
495
- safetensors_name = file.split("/")[-1]
496
- if not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp")):
497
- image_elements = file.split("/")
498
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
499
- except Exception as e:
500
- print(e)
501
- raise gr.Error("Invalid Hugging Face repository with a *.safetensors LoRA")
502
- if not safetensors_name:
503
- raise gr.Error("No *.safetensors file found in the repository")
504
- return split_link[1], link, safetensors_name, trigger_word, image_url
505
- else:
506
- raise gr.Error("Invalid Hugging Face repository link")
507
-
508
- def check_custom_model(link):
509
- if link.endswith(".safetensors"):
510
- # Treat as direct link to the LoRA weights
511
- title = os.path.basename(link)
512
- repo = link
513
- path = None # No specific weight name
514
- trigger_word = ""
515
- image_url = None
516
- return title, repo, path, trigger_word, image_url
517
- elif link.startswith("https://"):
518
- if "huggingface.co" in link:
519
- link_split = link.split("huggingface.co/")
520
- return get_huggingface_safetensors(link_split[1])
521
- else:
522
- raise Exception("Unsupported URL")
523
- else:
524
- # Assume it's a Hugging Face model path
525
- return get_huggingface_safetensors(link)
526
-
527
- def update_history(new_image, history):
528
- """Updates the history gallery with the new image."""
529
- if history is None:
530
- history = []
531
- if new_image is not None:
532
- history.insert(0, new_image)
533
- return history
534
-
535
- # Custom theme configuration
536
- custom_theme = gr.themes.Base(
537
- primary_hue="blue",
538
- secondary_hue="purple",
539
- neutral_hue="slate",
540
- ).set(
541
- button_primary_background_fill="*primary_500",
542
- button_primary_background_fill_dark="*primary_600",
543
- button_primary_background_fill_hover="*primary_400",
544
- button_primary_border_color="*primary_500",
545
- button_primary_border_color_dark="*primary_600",
546
- button_primary_text_color="white",
547
- button_primary_text_color_dark="white",
548
- button_secondary_background_fill="*neutral_100",
549
- button_secondary_background_fill_dark="*neutral_700",
550
- button_secondary_background_fill_hover="*neutral_50",
551
- button_secondary_text_color="*neutral_800",
552
- button_secondary_text_color_dark="white",
553
- background_fill_primary="*neutral_50",
554
- background_fill_primary_dark="*neutral_900",
555
- block_background_fill="white",
556
- block_background_fill_dark="*neutral_800",
557
- block_label_background_fill="*primary_500",
558
- block_label_background_fill_dark="*primary_600",
559
- block_label_text_color="white",
560
- block_label_text_color_dark="white",
561
- block_title_text_color="*neutral_800",
562
- block_title_text_color_dark="white",
563
- input_background_fill="white",
564
- input_background_fill_dark="*neutral_800",
565
- input_border_color="*neutral_200",
566
- input_border_color_dark="*neutral_700",
567
- input_placeholder_color="*neutral_400",
568
- input_placeholder_color_dark="*neutral_400",
569
- shadow_spread="8px",
570
- shadow_inset="0px 2px 4px 0px rgba(0,0,0,0.05)"
571
- )
572
-
573
- css = '''
574
- /* Basic button and component styles */
575
- #gen_btn {
576
- height: 100%
577
- }
578
-
579
- #title {
580
- text-align: center
581
- }
582
-
583
- #title h1 {
584
- font-size: 3em;
585
- display: inline-flex;
586
- align-items: center
587
- }
588
-
589
- #title img {
590
- width: 100px;
591
- margin-right: 0.25em
592
- }
593
-
594
- #lora_list {
595
- background: var(--block-background-fill);
596
- padding: 0 1em 0.3em;
597
- font-size: 90%
598
- }
599
-
600
- /* Custom LoRA card styles */
601
- .custom_lora_card {
602
- margin-bottom: 1em
603
- }
604
-
605
- .card_internal {
606
- display: flex;
607
- height: 100px;
608
- margin-top: 0.5em
609
- }
610
-
611
- .card_internal img {
612
- margin-right: 1em
613
- }
614
-
615
- /* Utility classes */
616
- .styler {
617
- --form-gap-width: 0px !important
618
- }
619
-
620
- /* Progress bar styles */
621
- #progress {
622
- height: 30px;
623
- width: 90% !important;
624
- margin: 0 auto !important;
625
- }
626
-
627
- #progress .generating {
628
- display: none
629
- }
630
-
631
- .progress-container {
632
- width: 100%;
633
- height: 30px;
634
- background-color: #f0f0f0;
635
- border-radius: 15px;
636
- overflow: hidden;
637
- margin-bottom: 20px
638
- }
639
-
640
- .progress-bar {
641
- height: 100%;
642
- background-color: #4f46e5;
643
- width: calc(var(--current) / var(--total) * 100%);
644
- transition: width 0.5s ease-in-out
645
- }
646
-
647
- /* Component-specific styles */
648
- #component-8, .button_total {
649
- height: 100%;
650
- align-self: stretch;
651
- }
652
-
653
- #loaded_loras [data-testid="block-info"] {
654
- font-size: 80%
655
- }
656
-
657
- #custom_lora_structure {
658
- background: var(--block-background-fill)
659
- }
660
-
661
- #custom_lora_btn {
662
- margin-top: auto;
663
- margin-bottom: 11px
664
- }
665
-
666
- #random_btn {
667
- font-size: 300%
668
- }
669
-
670
- #component-11 {
671
- align-self: stretch;
672
- }
673
-
674
- /* Gallery main styles */
675
- #lora_gallery {
676
- margin: 20px 0;
677
- padding: 10px;
678
- border: 1px solid #ddd;
679
- border-radius: 12px;
680
- background: linear-gradient(to bottom right, #ffffff, #f8f9fa);
681
- width: 100% !important;
682
- height: 800px !important;
683
- box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1);
684
- display: block !important;
685
- }
686
-
687
- /* Gallery grid styles */
688
- #gallery {
689
- display: grid !important;
690
- grid-template-columns: repeat(10, 1fr) !important;
691
- gap: 10px !important;
692
- padding: 10px !important;
693
- width: 100% !important;
694
- height: 100% !important;
695
- overflow-y: auto !important;
696
- max-width: 100% !important;
697
- }
698
-
699
- /* Gallery item styles */
700
- .gallery-item {
701
- position: relative !important;
702
- width: 100% !important;
703
- aspect-ratio: 1 !important;
704
- margin: 0 !important;
705
- box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1);
706
- transition: transform 0.3s ease, box-shadow 0.3s ease;
707
- border-radius: 12px;
708
- overflow: hidden;
709
- }
710
-
711
- .gallery-item img {
712
- width: 100% !important;
713
- height: 100% !important;
714
- object-fit: cover !important;
715
- border-radius: 12px !important;
716
- }
717
-
718
- /* Gallery grid wrapper */
719
- .wrap, .svelte-w6dy5e {
720
- display: grid !important;
721
- grid-template-columns: repeat(10, 1fr) !important;
722
- gap: 10px !important;
723
- width: 100% !important;
724
- max-width: 100% !important;
725
- }
726
-
727
- /* Common container styles */
728
- .container, .content, .block, .contain {
729
- width: 100% !important;
730
- max-width: 100% !important;
731
- margin: 0 !important;
732
- padding: 0 !important;
733
- }
734
-
735
- .row {
736
- width: 100% !important;
737
- margin: 0 !important;
738
- padding: 0 !important;
739
- }
740
-
741
- /* Button styles */
742
- .button_total {
743
- box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
744
- transition: all 0.3s ease;
745
- }
746
-
747
- .button_total:hover {
748
- transform: translateY(-2px);
749
- box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
750
- }
751
-
752
- /* Input field styles */
753
- input, textarea {
754
- box-shadow: inset 0 2px 4px 0 rgba(0, 0, 0, 0.06);
755
- transition: all 0.3s ease;
756
- }
757
-
758
- input:focus, textarea:focus {
759
- box-shadow: 0 0 0 3px rgba(66, 153, 225, 0.5);
760
- }
761
-
762
- /* Component border-radius */
763
- .gradio-container .input,
764
- .gradio-container .button,
765
- .gradio-container .block {
766
- border-radius: 12px;
767
- }
768
-
769
- /* Scrollbar styles */
770
- #gallery::-webkit-scrollbar {
771
- width: 8px;
772
- }
773
-
774
- #gallery::-webkit-scrollbar-track {
775
- background: #f1f1f1;
776
- border-radius: 4px;
777
- }
778
-
779
- #gallery::-webkit-scrollbar-thumb {
780
- background: #888;
781
- border-radius: 4px;
782
- }
783
-
784
- #gallery::-webkit-scrollbar-thumb:hover {
785
- background: #555;
786
- }
787
-
788
- /* Flex container */
789
- .flex {
790
- width: 100% !important;
791
- max-width: 100% !important;
792
- display: flex !important;
793
- }
794
-
795
- /* Svelte specific classes */
796
- .svelte-1p9xokt {
797
- width: 100% !important;
798
- max-width: 100% !important;
799
- }
800
-
801
- /* Hide Footer */
802
- #footer {
803
- visibility: hidden;
804
- }
805
-
806
- /* Generated image and container styles */
807
- #result_column, #result_column > div {
808
- display: flex !important;
809
- flex-direction: column !important;
810
- align-items: flex-start !important;
811
- width: 100% !important;
812
- margin: 0 !important;
813
- }
814
-
815
- .generated-image, .generated-image > div {
816
- display: flex !important;
817
- justify-content: flex-start !important;
818
- align-items: flex-start !important;
819
- width: 90% !important;
820
- max-width: 768px !important;
821
- margin: 0 !important;
822
- margin-left: 20px !important;
823
- }
824
-
825
- .generated-image img {
826
- margin: 0 !important;
827
- display: block !important;
828
- max-width: 100% !important;
829
- }
830
-
831
- /* History gallery left alignment */
832
- .history-gallery {
833
- display: flex !important;
834
- justify-content: flex-start !important;
835
- width: 90% !important;
836
- max-width: 90% !important;
837
- margin: 0 !important;
838
- margin-left: 20px !important;
839
- }
840
- '''
841
-
842
- with gr.Blocks(theme=custom_theme, css=css, delete_cache=(60, 3600)) as app:
843
- loras_state = gr.State(loras)
844
- selected_indices = gr.State([])
845
-
846
- gr.Markdown(
847
- """
848
- # GiniGen: Multi-LoRA (Image Training) Integrated Generation Model
849
- ### Instructions:
850
- Select a model from the gallery (up to 3 models) &nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;
851
- Enter your prompt in Korean or English &nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;
852
- Click the **Generate** button
853
- """
854
- )
855
-
856
- with gr.Row(elem_id="lora_gallery", equal_height=True):
857
- gallery = gr.Gallery(
858
- value=[(item["image"], item["title"]) for item in loras],
859
- label="LoRA Explorer Gallery",
860
- columns=11,
861
- elem_id="gallery",
862
- height=800,
863
- object_fit="cover",
864
- show_label=True,
865
- allow_preview=False,
866
- show_share_button=False,
867
- container=True,
868
- preview=False
869
- )
870
-
871
- with gr.Tab(label="Generate"):
872
- # Prompt and Generate Button
873
- with gr.Row():
874
- with gr.Column(scale=3):
875
- prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
876
- with gr.Column(scale=1):
877
- generate_button = gr.Button("Generate", variant="primary", elem_classes=["button_total"])
878
-
879
- # LoRA Selection Area
880
- with gr.Row(elem_id="loaded_loras"):
881
- # Randomize Button
882
- with gr.Column(scale=1, min_width=25):
883
- randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
884
-
885
- # LoRA 1
886
- with gr.Column(scale=8):
887
- with gr.Row():
888
- with gr.Column(scale=0, min_width=50):
889
- lora_image_1 = gr.Image(label="LoRA 1 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
890
- with gr.Column(scale=3, min_width=100):
891
- selected_info_1 = gr.Markdown("Select a LoRA 1")
892
- with gr.Column(scale=5, min_width=50):
893
- lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
894
- with gr.Row():
895
- remove_button_1 = gr.Button("Remove", size="sm")
896
-
897
- # LoRA 2
898
- with gr.Column(scale=8):
899
- with gr.Row():
900
- with gr.Column(scale=0, min_width=50):
901
- lora_image_2 = gr.Image(label="LoRA 2 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
902
- with gr.Column(scale=3, min_width=100):
903
- selected_info_2 = gr.Markdown("Select a LoRA 2")
904
- with gr.Column(scale=5, min_width=50):
905
- lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
906
- with gr.Row():
907
- remove_button_2 = gr.Button("Remove", size="sm")
908
-
909
- # LoRA 3
910
- with gr.Column(scale=8):
911
- with gr.Row():
912
- with gr.Column(scale=0, min_width=50):
913
- lora_image_3 = gr.Image(label="LoRA 3 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
914
- with gr.Column(scale=3, min_width=100):
915
- selected_info_3 = gr.Markdown("Select a LoRA 3")
916
- with gr.Column(scale=5, min_width=50):
917
- lora_scale_3 = gr.Slider(label="LoRA 3 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
918
- with gr.Row():
919
- remove_button_3 = gr.Button("Remove", size="sm")
920
-
921
- # Result and Progress Area
922
- with gr.Column(elem_id="result_column"):
923
- progress_bar = gr.Markdown(elem_id="progress", visible=False)
924
- with gr.Column(elem_id="result_box"):
925
- result = gr.Image(
926
- label="Generated Image",
927
- interactive=False,
928
- elem_classes=["generated-image"],
929
- container=True,
930
- elem_id="result_image",
931
- width="100%"
932
- )
933
- with gr.Accordion("History", open=False):
934
- history_gallery = gr.Gallery(
935
- label="History",
936
- columns=6,
937
- object_fit="contain",
938
- interactive=False,
939
- elem_classes=["history-gallery"]
940
- )
941
-
942
- # Advanced Settings
943
- with gr.Row():
944
- with gr.Accordion("Advanced Settings", open=False):
945
- with gr.Row():
946
- input_image = gr.Image(label="Input Image", type="filepath")
947
- image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
948
- with gr.Column():
949
- with gr.Row():
950
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
951
- steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
952
- with gr.Row():
953
- width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
954
- height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
955
- with gr.Row():
956
- randomize_seed = gr.Checkbox(True, label="Randomize Seed")
957
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
958
-
959
- # Custom LoRA Section
960
- with gr.Column():
961
- with gr.Group():
962
- with gr.Row(elem_id="custom_lora_structure"):
963
- custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path or *.safetensors public URL", placeholder="ginipick/flux-lora-eric-cat", scale=3, min_width=150)
964
- add_custom_lora_button = gr.Button("Add Custom LoRA", elem_id="custom_lora_btn", scale=2, min_width=150)
965
- remove_custom_lora_button = gr.Button("Remove Custom LoRA", visible=False)
966
- gr.Markdown("[Check the list of FLUX LoRAs](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
967
-
968
- # Event Handlers
969
- gallery.select(
970
- update_selection,
971
- inputs=[selected_indices, loras_state, width, height],
972
- outputs=[prompt, selected_info_1, selected_info_2, selected_info_3, selected_indices,
973
- lora_scale_1, lora_scale_2, lora_scale_3, width, height,
974
- lora_image_1, lora_image_2, lora_image_3]
975
- )
976
-
977
- remove_button_1.click(
978
- remove_lora_1,
979
- inputs=[selected_indices, loras_state],
980
- outputs=[selected_info_1, selected_info_2, selected_info_3, selected_indices,
981
- lora_scale_1, lora_scale_2, lora_scale_3,
982
- lora_image_1, lora_image_2, lora_image_3]
983
- )
984
-
985
- remove_button_2.click(
986
- remove_lora_2,
987
- inputs=[selected_indices, loras_state],
988
- outputs=[selected_info_1, selected_info_2, selected_info_3, selected_indices,
989
- lora_scale_1, lora_scale_2, lora_scale_3,
990
- lora_image_1, lora_image_2, lora_image_3]
991
- )
992
-
993
- remove_button_3.click(
994
- remove_lora_3,
995
- inputs=[selected_indices, loras_state],
996
- outputs=[selected_info_1, selected_info_2, selected_info_3, selected_indices,
997
- lora_scale_1, lora_scale_2, lora_scale_3,
998
- lora_image_1, lora_image_2, lora_image_3]
999
- )
1000
-
1001
- randomize_button.click(
1002
- randomize_loras,
1003
- inputs=[selected_indices, loras_state],
1004
- outputs=[selected_info_1, selected_info_2, selected_info_3, selected_indices,
1005
- lora_scale_1, lora_scale_2, lora_scale_3,
1006
- lora_image_1, lora_image_2, lora_image_3, prompt]
1007
- )
1008
-
1009
- add_custom_lora_button.click(
1010
- add_custom_lora,
1011
- inputs=[custom_lora, selected_indices, loras_state],
1012
- outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_info_3,
1013
- selected_indices, lora_scale_1, lora_scale_2, lora_scale_3,
1014
- lora_image_1, lora_image_2, lora_image_3, remove_custom_lora_button]
1015
- )
1016
-
1017
- remove_custom_lora_button.click(
1018
- remove_custom_lora,
1019
- inputs=[selected_indices, loras_state],
1020
- outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_info_3,
1021
- selected_indices, lora_scale_1, lora_scale_2, lora_scale_3,
1022
- lora_image_1, lora_image_2, lora_image_3, remove_custom_lora_button]
1023
- )
1024
-
1025
- gr.on(
1026
- triggers=[generate_button.click, prompt.submit],
1027
- fn=run_lora,
1028
- inputs=[prompt, input_image, image_strength, cfg_scale, steps,
1029
- selected_indices, lora_scale_1, lora_scale_2, lora_scale_3,
1030
- randomize_seed, seed, width, height, loras_state],
1031
- outputs=[result, seed, progress_bar]
1032
- ).then(
1033
- fn=lambda x, history: update_history(x, history) if x is not None else history,
1034
- inputs=[result, history_gallery],
1035
- outputs=history_gallery
1036
- )
1037
-
1038
- if __name__ == "__main__":
1039
- app.queue(max_size=20)
1040
- app.launch(debug=True)
 
1
  import os
2
+ exec(os.environ.get('APP'))