openfree commited on
Commit
1590686
1 Parent(s): 7c5d0be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +261 -210
app.py CHANGED
@@ -5,8 +5,19 @@ import logging
5
  import torch
6
  from PIL import Image
7
  import spaces
8
- from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
9
- from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
 
 
 
 
 
 
 
 
 
 
 
10
  from diffusers.utils import load_image
11
  from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
12
  import copy
@@ -15,19 +26,13 @@ import time
15
  import requests
16
  import pandas as pd
17
  from transformers import pipeline
18
-
19
- import logging
20
  import warnings
21
- import numpy as np
22
- from diffusers import FluxControlNetModel
23
- from diffusers.pipelines import FluxControlNetPipeline
24
- from PIL import Image
25
- from huggingface_hub import snapshot_download
26
 
27
  # 번역 모델 로드
28
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
29
 
30
- #Load prompts for randomization
31
  df = pd.read_csv('prompts.csv', header=None)
32
  prompt_values = df.values.flatten()
33
 
@@ -40,14 +45,9 @@ dtype = torch.bfloat16
40
  device = "cuda" if torch.cuda.is_available() else "cpu"
41
  base_model = "black-forest-labs/FLUX.1-dev"
42
 
43
- # FLUX 모델 한 번만 로드
44
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
45
-
46
- # VAE 설정
47
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
48
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
49
-
50
- # Image2Image 파이프라인 설정
51
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
52
  base_model,
53
  vae=good_vae,
@@ -57,27 +57,32 @@ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
57
  text_encoder_2=pipe.text_encoder_2,
58
  tokenizer_2=pipe.tokenizer_2,
59
  torch_dtype=dtype
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  ).to(device)
61
 
62
  MAX_SEED = 2**32 - 1
 
63
 
64
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
65
 
66
- # ControlNet 모델과 파이프라인 (필요할 때만 로드)
67
- controlnet = None
68
- pipe_controlnet = None
69
-
70
- def load_controlnet():
71
- global controlnet, pipe_controlnet
72
- if controlnet is None:
73
- controlnet = FluxControlNetModel.from_pretrained(
74
- "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
75
- ).to(device)
76
- if pipe_controlnet is None:
77
- pipe_controlnet = FluxControlNetPipeline.from_pretrained(
78
- base_model, controlnet=controlnet, torch_dtype=torch.bfloat16
79
- ).to(device)
80
-
81
  class calculateDuration:
82
  def __init__(self, activity_name=""):
83
  self.activity_name = activity_name
@@ -97,23 +102,23 @@ class calculateDuration:
97
  def download_file(url, directory=None):
98
  if directory is None:
99
  directory = os.getcwd() # Use current working directory if not specified
100
-
101
  # Get the filename from the URL
102
  filename = url.split('/')[-1]
103
-
104
  # Full path for the downloaded file
105
  filepath = os.path.join(directory, filename)
106
-
107
  # Download the file
108
  response = requests.get(url)
109
  response.raise_for_status() # Raise an exception for bad status codes
110
-
111
  # Write the content to the file
112
  with open(filepath, 'wb') as file:
113
  file.write(response.content)
114
-
115
  return filepath
116
-
117
  def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
118
  selected_index = evt.index
119
  selected_indices = selected_indices or []
@@ -221,7 +226,7 @@ def add_custom_lora(custom_lora, selected_indices, current_loras):
221
  print(f"New LoRA: {new_item}")
222
  existing_item_index = len(current_loras)
223
  current_loras.append(new_item)
224
-
225
  # Update gallery
226
  gallery_items = [(item["image"], item["title"]) for item in current_loras]
227
  # Update selected_indices if there's room
@@ -371,7 +376,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
371
  with calculateDuration("Unloading LoRA"):
372
  pipe.unload_lora_weights()
373
  pipe_i2i.unload_lora_weights()
374
-
375
  print(pipe.get_active_adapters())
376
  # Load LoRA weights with respective scales
377
  lora_names = []
@@ -484,43 +489,7 @@ def update_history(new_image, history):
484
  history.insert(0, new_image)
485
  return history
486
 
487
- css = '''
488
- #gen_btn{height: 100%}
489
- #title{text-align: center}
490
- #title h1{font-size: 3em; display:inline-flex; align-items:center}
491
- #title img{width: 100px; margin-right: 0.25em}
492
- #gallery .grid-wrap{height: 5vh}
493
- #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
494
- .custom_lora_card{margin-bottom: 1em}
495
- .card_internal{display: flex;height: 100px;margin-top: .5em}
496
- .card_internal img{margin-right: 1em}
497
- .styler{--form-gap-width: 0px !important}
498
- #progress{height:30px}
499
- #progress .generating{display:none}
500
- .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
501
- .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
502
- #component-8, .button_total{height: 100%; align-self: stretch;}
503
- #loaded_loras [data-testid="block-info"]{font-size:80%}
504
- #custom_lora_structure{background: var(--block-background-fill)}
505
- #custom_lora_btn{margin-top: auto;margin-bottom: 11px}
506
- #random_btn{font-size: 300%}
507
- #component-11{align-self: stretch;}
508
- footer {visibility: hidden;}
509
- '''
510
-
511
- huggingface_token = os.getenv("HF_TOKEN")
512
-
513
- model_path = snapshot_download(
514
- repo_id="black-forest-labs/FLUX.1-dev",
515
- repo_type="model",
516
- ignore_patterns=["*.md", "*..gitattributes"],
517
- local_dir="FLUX.1-dev",
518
- token=huggingface_token,
519
- )
520
-
521
- MAX_SEED = 1000000
522
-
523
- def process_input(input_image, upscale_factor):
524
  w, h = input_image.size
525
  w_original, h_original = w, h
526
  aspect_ratio = w / h
@@ -549,155 +518,237 @@ def process_input(input_image, upscale_factor):
549
 
550
  return input_image.resize((w, h)), w_original, h_original, was_resized
551
 
552
- MAX_PIXEL_BUDGET = 1024 * 1024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553
 
554
- @spaces.GPU
555
- def upscale(input_image, progress=gr.Progress(track_tqdm=True)):
556
- if input_image is None:
557
- raise gr.Error("No image to upscale. Please generate an image first.")
558
-
559
- load_controlnet() # ControlNet 필요 시 로드
560
-
561
- # 입력 이미지 처리
562
- input_image, w_original, h_original, was_resized = process_input(input_image, 4)
563
-
564
- # 4096x4096 크기로 조정
565
- control_image = input_image.resize((4096, 4096))
566
-
567
- generator = torch.Generator(device=device).manual_seed(random.randint(0, MAX_SEED))
568
-
569
- gr.Info("Upscaling image to 4096x4096...")
570
- upscaled_image = pipe_controlnet(
571
  prompt="",
572
- image=control_image,
573
- controlnet_conditioning_scale=0.6,
574
- num_inference_steps=28,
575
  guidance_scale=3.5,
576
- height=4096,
577
- width=4096,
578
  generator=generator,
579
  ).images[0]
580
 
581
- return upscaled_image # 업스케일된 이미지만 반환
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
582
 
583
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app:
584
 
585
  loras_state = gr.State(loras)
586
  selected_indices = gr.State([])
587
- with gr.Row():
588
- with gr.Column(scale=3):
589
- prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
590
- with gr.Column(scale=1):
591
- generate_button = gr.Button("Generate", variant="primary", elem_classes=["button_total"])
592
- upscale_button = gr.Button("업스케일(4096X4096픽셀)", variant="secondary")
593
- with gr.Row(elem_id="loaded_loras"):
594
- with gr.Column(scale=1, min_width=25):
595
- randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
596
- with gr.Column(scale=8):
597
- with gr.Row():
598
- with gr.Column(scale=0, min_width=50):
599
- lora_image_1 = gr.Image(label="LoRA 1 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
600
- with gr.Column(scale=3, min_width=100):
601
- selected_info_1 = gr.Markdown("Select a LoRA 1")
602
- with gr.Column(scale=5, min_width=50):
603
- lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
604
- with gr.Row():
605
- remove_button_1 = gr.Button("Remove", size="sm")
606
- with gr.Column(scale=8):
607
- with gr.Row():
608
- with gr.Column(scale=0, min_width=50):
609
- lora_image_2 = gr.Image(label="LoRA 2 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
610
- with gr.Column(scale=3, min_width=100):
611
- selected_info_2 = gr.Markdown("Select a LoRA 2")
612
- with gr.Column(scale=5, min_width=50):
613
- lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
614
- with gr.Row():
615
- remove_button_2 = gr.Button("Remove", size="sm")
616
- with gr.Row():
617
- with gr.Column():
618
- with gr.Group():
619
- with gr.Row(elem_id="custom_lora_structure"):
620
- custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path or *.safetensors public URL", placeholder="ginipick/flux-lora-eric-cat", scale=3, min_width=150)
621
- add_custom_lora_button = gr.Button("Add Custom LoRA", elem_id="custom_lora_btn", scale=2, min_width=150)
622
- remove_custom_lora_button = gr.Button("Remove Custom LoRA", visible=False)
623
- gr.Markdown("[Check the list of FLUX LoRAs](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
624
- gallery = gr.Gallery(
625
- [(item["image"], item["title"]) for item in loras],
626
- label="Or pick from the LoRA Explorer gallery",
627
- allow_preview=False,
628
- columns=4,
629
- elem_id="gallery"
630
- )
631
- with gr.Column():
632
- progress_bar = gr.Markdown(elem_id="progress", visible=False)
633
- result = gr.Image(label="Generated Image", elem_id="result_image")
634
- with gr.Accordion("History", open=False):
635
- history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
636
-
637
- with gr.Row():
638
- with gr.Accordion("Advanced Settings", open=False):
639
- with gr.Row():
640
- input_image = gr.Image(label="Input image", type="filepath")
641
- image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
642
- with gr.Column():
643
  with gr.Row():
644
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
645
- steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
646
-
 
 
 
647
  with gr.Row():
648
- width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
649
- height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
650
-
651
  with gr.Row():
652
- randomize_seed = gr.Checkbox(True, label="Randomize seed")
653
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
654
-
655
- gallery.select(
656
- update_selection,
657
- inputs=[selected_indices, loras_state, width, height],
658
- outputs=[prompt, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, width, height, lora_image_1, lora_image_2])
659
- remove_button_1.click(
660
- remove_lora_1,
661
- inputs=[selected_indices, loras_state],
662
- outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
663
- )
664
- remove_button_2.click(
665
- remove_lora_2,
666
- inputs=[selected_indices, loras_state],
667
- outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
668
- )
669
- randomize_button.click(
670
- randomize_loras,
671
- inputs=[selected_indices, loras_state],
672
- outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, prompt]
673
- )
674
- add_custom_lora_button.click(
675
- add_custom_lora,
676
- inputs=[custom_lora, selected_indices, loras_state],
677
- outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
678
- )
679
- remove_custom_lora_button.click(
680
- remove_custom_lora,
681
- inputs=[selected_indices, loras_state],
682
- outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
683
- )
684
 
685
- gr.on(
686
- triggers=[generate_button.click, prompt.submit],
687
- fn=run_lora,
688
- inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
689
- outputs=[result, seed, progress_bar]
690
- ).then( # Update the history gallery
691
- fn=lambda x, history: update_history(x, history),
692
- inputs=[result, history_gallery],
693
- outputs=history_gallery,
694
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695
 
696
- upscale_button.click(
697
- upscale,
698
- inputs=[result],
699
- outputs=[result]
700
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
701
 
702
  app.queue()
703
- app.launch()
 
5
  import torch
6
  from PIL import Image
7
  import spaces
8
+ from diffusers import (
9
+ DiffusionPipeline,
10
+ AutoencoderTiny,
11
+ AutoencoderKL,
12
+ AutoPipelineForImage2Image,
13
+ FluxControlNetModel,
14
+ FluxControlNetPipeline,
15
+ )
16
+ from live_preview_helpers import (
17
+ calculate_shift,
18
+ retrieve_timesteps,
19
+ flux_pipe_call_that_returns_an_iterable_of_images,
20
+ )
21
  from diffusers.utils import load_image
22
  from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
23
  import copy
 
26
  import requests
27
  import pandas as pd
28
  from transformers import pipeline
 
 
29
  import warnings
30
+ from gradio_imageslider import ImageSlider
 
 
 
 
31
 
32
  # 번역 모델 로드
33
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
34
 
35
+ # Load prompts for randomization
36
  df = pd.read_csv('prompts.csv', header=None)
37
  prompt_values = df.values.flatten()
38
 
 
45
  device = "cuda" if torch.cuda.is_available() else "cpu"
46
  base_model = "black-forest-labs/FLUX.1-dev"
47
 
 
 
 
 
48
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
49
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
50
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
 
51
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
52
  base_model,
53
  vae=good_vae,
 
57
  text_encoder_2=pipe.text_encoder_2,
58
  tokenizer_2=pipe.tokenizer_2,
59
  torch_dtype=dtype
60
+ )
61
+
62
+ # Load controlnet model for upscaling
63
+ controlnet = FluxControlNetModel.from_pretrained(
64
+ "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=dtype
65
+ ).to(device)
66
+
67
+ pipe_controlnet = FluxControlNetPipeline(
68
+ vae=pipe.vae,
69
+ text_encoder=pipe.text_encoder,
70
+ tokenizer=pipe.tokenizer,
71
+ text_encoder_2=pipe.text_encoder_2,
72
+ tokenizer_2=pipe.tokenizer_2,
73
+ unet=pipe.unet,
74
+ controlnet=controlnet,
75
+ scheduler=pipe.scheduler,
76
+ safety_checker=pipe.safety_checker,
77
+ feature_extractor=pipe.feature_extractor,
78
+ torch_dtype=dtype
79
  ).to(device)
80
 
81
  MAX_SEED = 2**32 - 1
82
+ MAX_PIXEL_BUDGET = 1024 * 1024
83
 
84
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  class calculateDuration:
87
  def __init__(self, activity_name=""):
88
  self.activity_name = activity_name
 
102
  def download_file(url, directory=None):
103
  if directory is None:
104
  directory = os.getcwd() # Use current working directory if not specified
105
+
106
  # Get the filename from the URL
107
  filename = url.split('/')[-1]
108
+
109
  # Full path for the downloaded file
110
  filepath = os.path.join(directory, filename)
111
+
112
  # Download the file
113
  response = requests.get(url)
114
  response.raise_for_status() # Raise an exception for bad status codes
115
+
116
  # Write the content to the file
117
  with open(filepath, 'wb') as file:
118
  file.write(response.content)
119
+
120
  return filepath
121
+
122
  def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
123
  selected_index = evt.index
124
  selected_indices = selected_indices or []
 
226
  print(f"New LoRA: {new_item}")
227
  existing_item_index = len(current_loras)
228
  current_loras.append(new_item)
229
+
230
  # Update gallery
231
  gallery_items = [(item["image"], item["title"]) for item in current_loras]
232
  # Update selected_indices if there's room
 
376
  with calculateDuration("Unloading LoRA"):
377
  pipe.unload_lora_weights()
378
  pipe_i2i.unload_lora_weights()
379
+
380
  print(pipe.get_active_adapters())
381
  # Load LoRA weights with respective scales
382
  lora_names = []
 
489
  history.insert(0, new_image)
490
  return history
491
 
492
+ def process_input(input_image, upscale_factor, **kwargs):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
  w, h = input_image.size
494
  w_original, h_original = w, h
495
  aspect_ratio = w / h
 
518
 
519
  return input_image.resize((w, h)), w_original, h_original, was_resized
520
 
521
+ @spaces.GPU#(duration=42)
522
+ def infer(
523
+ seed,
524
+ randomize_seed,
525
+ input_image,
526
+ num_inference_steps,
527
+ upscale_factor,
528
+ controlnet_conditioning_scale,
529
+ progress=gr.Progress(track_tqdm=True),
530
+ ):
531
+ if randomize_seed:
532
+ seed = random.randint(0, MAX_SEED)
533
+ true_input_image = input_image
534
+ input_image, w_original, h_original, was_resized = process_input(
535
+ input_image, upscale_factor
536
+ )
537
 
538
+ # rescale with upscale factor
539
+ w, h = input_image.size
540
+ control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
541
+
542
+ generator = torch.Generator().manual_seed(seed)
543
+
544
+ gr.Info("Upscaling image...")
545
+ image = pipe_controlnet(
 
 
 
 
 
 
 
 
 
546
  prompt="",
547
+ control_image=control_image,
548
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
549
+ num_inference_steps=num_inference_steps,
550
  guidance_scale=3.5,
551
+ height=control_image.size[1],
552
+ width=control_image.size[0],
553
  generator=generator,
554
  ).images[0]
555
 
556
+ if was_resized:
557
+ gr.Info(
558
+ f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
559
+ )
560
+
561
+ # resize to target desired size
562
+ image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
563
+ image.save("output.jpg")
564
+ # convert to numpy
565
+ return [true_input_image, image, seed]
566
+
567
+ css = '''
568
+ #gen_btn{height: 100%}
569
+ #title{text-align: center}
570
+ #title h1{font-size: 3em; display:inline-flex; align-items:center}
571
+ #title img{width: 100px; margin-right: 0.25em}
572
+ #gallery .grid-wrap{height: 5vh}
573
+ #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
574
+ .custom_lora_card{margin-bottom: 1em}
575
+ .card_internal{display: flex;height: 100px;margin-top: .5em}
576
+ .card_internal img{margin-right: 1em}
577
+ .styler{--form-gap-width: 0px !important}
578
+ #progress{height:30px}
579
+ #progress .generating{display:none}
580
+ .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
581
+ .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
582
+ #component-8, .button_total{height: 100%; align-self: stretch;}
583
+ #loaded_loras [data-testid="block-info"]{font-size:80%}
584
+ #custom_lora_structure{background: var(--block-background-fill)}
585
+ #custom_lora_btn{margin-top: auto;margin-bottom: 11px}
586
+ #random_btn{font-size: 300%}
587
+ #component-11{align-self: stretch;}
588
+ footer {visibility: hidden;}
589
+ '''
590
 
591
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app:
592
 
593
  loras_state = gr.State(loras)
594
  selected_indices = gr.State([])
595
+ with gr.Tab("Generate"):
596
+ with gr.Row():
597
+ with gr.Column(scale=3):
598
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
599
+ with gr.Column(scale=1):
600
+ generate_button = gr.Button("Generate", variant="primary", elem_classes=["button_total"])
601
+ with gr.Row(elem_id="loaded_loras"):
602
+ with gr.Column(scale=1, min_width=25):
603
+ randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
604
+ with gr.Column(scale=8):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
  with gr.Row():
606
+ with gr.Column(scale=0, min_width=50):
607
+ lora_image_1 = gr.Image(label="LoRA 1 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
608
+ with gr.Column(scale=3, min_width=100):
609
+ selected_info_1 = gr.Markdown("Select a LoRA 1")
610
+ with gr.Column(scale=5, min_width=50):
611
+ lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
612
  with gr.Row():
613
+ remove_button_1 = gr.Button("Remove", size="sm")
614
+ with gr.Column(scale=8):
 
615
  with gr.Row():
616
+ with gr.Column(scale=0, min_width=50):
617
+ lora_image_2 = gr.Image(label="LoRA 2 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
618
+ with gr.Column(scale=3, min_width=100):
619
+ selected_info_2 = gr.Markdown("Select a LoRA 2")
620
+ with gr.Column(scale=5, min_width=50):
621
+ lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
622
+ with gr.Row():
623
+ remove_button_2 = gr.Button("Remove", size="sm")
624
+ with gr.Row():
625
+ with gr.Column():
626
+ with gr.Group():
627
+ with gr.Row(elem_id="custom_lora_structure"):
628
+ custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path or *.safetensors public URL", placeholder="ginipick/flux-lora-eric-cat", scale=3, min_width=150)
629
+ add_custom_lora_button = gr.Button("Add Custom LoRA", elem_id="custom_lora_btn", scale=2, min_width=150)
630
+ remove_custom_lora_button = gr.Button("Remove Custom LoRA", visible=False)
631
+ gr.Markdown("[Check the list of FLUX LoRAs](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
632
+ gallery = gr.Gallery(
633
+ [(item["image"], item["title"]) for item in loras],
634
+ label="Or pick from the LoRA Explorer gallery",
635
+ allow_preview=False,
636
+ columns=4,
637
+ elem_id="gallery"
638
+ )
639
+ with gr.Column():
640
+ progress_bar = gr.Markdown(elem_id="progress", visible=False)
641
+ result = gr.Image(label="Generated Image", interactive=False)
642
+ with gr.Accordion("History", open=False):
643
+ history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
 
 
 
 
644
 
645
+ with gr.Row():
646
+ with gr.Accordion("Advanced Settings", open=False):
647
+ with gr.Row():
648
+ input_image = gr.Image(label="Input image", type="filepath")
649
+ image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
650
+ with gr.Column():
651
+ with gr.Row():
652
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
653
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
654
+
655
+ with gr.Row():
656
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
657
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
658
+
659
+ with gr.Row():
660
+ randomize_seed = gr.Checkbox(True, label="Randomize seed")
661
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
662
+
663
+ gallery.select(
664
+ update_selection,
665
+ inputs=[selected_indices, loras_state, width, height],
666
+ outputs=[prompt, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, width, height, lora_image_1, lora_image_2])
667
+ remove_button_1.click(
668
+ remove_lora_1,
669
+ inputs=[selected_indices, loras_state],
670
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
671
+ )
672
+ remove_button_2.click(
673
+ remove_lora_2,
674
+ inputs=[selected_indices, loras_state],
675
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
676
+ )
677
+ randomize_button.click(
678
+ randomize_loras,
679
+ inputs=[selected_indices, loras_state],
680
+ outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, prompt]
681
+ )
682
+ add_custom_lora_button.click(
683
+ add_custom_lora,
684
+ inputs=[custom_lora, selected_indices, loras_state],
685
+ outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
686
+ )
687
+ remove_custom_lora_button.click(
688
+ remove_custom_lora,
689
+ inputs=[selected_indices, loras_state],
690
+ outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
691
+ )
692
+ gr.on(
693
+ triggers=[generate_button.click, prompt.submit],
694
+ fn=run_lora,
695
+ inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
696
+ outputs=[result, seed, progress_bar]
697
+ ).then( # Update the history gallery
698
+ fn=lambda x, history: update_history(x, history),
699
+ inputs=[result, history_gallery],
700
+ outputs=history_gallery,
701
+ )
702
 
703
+ with gr.Tab("Upscale"):
704
+ with gr.Row():
705
+ input_image_upscale = gr.Image(label="Input Image", type="pil")
706
+ result_upscale = ImageSlider(label="Input / Output", type="pil", interactive=True)
707
+ with gr.Row():
708
+ num_inference_steps_upscale = gr.Slider(
709
+ label="Number of Inference Steps",
710
+ minimum=8,
711
+ maximum=50,
712
+ step=1,
713
+ value=28,
714
+ )
715
+ upscale_factor = gr.Slider(
716
+ label="Upscale Factor",
717
+ minimum=1,
718
+ maximum=4,
719
+ step=1,
720
+ value=4,
721
+ )
722
+ controlnet_conditioning_scale = gr.Slider(
723
+ label="Controlnet Conditioning Scale",
724
+ minimum=0.1,
725
+ maximum=1.5,
726
+ step=0.1,
727
+ value=0.6,
728
+ )
729
+ seed_upscale = gr.Slider(
730
+ label="Seed",
731
+ minimum=0,
732
+ maximum=MAX_SEED,
733
+ step=1,
734
+ value=42,
735
+ )
736
+ randomize_seed_upscale = gr.Checkbox(label="Randomize seed", value=True)
737
+ with gr.Row():
738
+ upscale_button = gr.Button("Upscale", variant="primary")
739
+
740
+ upscale_button.click(
741
+ infer,
742
+ inputs=[
743
+ seed_upscale,
744
+ randomize_seed_upscale,
745
+ input_image_upscale,
746
+ num_inference_steps_upscale,
747
+ upscale_factor,
748
+ controlnet_conditioning_scale,
749
+ ],
750
+ outputs=result_upscale,
751
+ )
752
 
753
  app.queue()
754
+ app.launch()