AlekseyCalvin commited on
Commit
98336c9
·
verified ·
1 Parent(s): af16d31

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +206 -0
app.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import json
4
+ import logging
5
+ import torch
6
+ from PIL import Image
7
+ from os import path
8
+ import spaces
9
+ from diffusers import DiffusionPipeline, AutoencoderTiny
10
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
11
+ from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
12
+ from diffusers.models.transformers import FluxTransformer2DModel
13
+ import copy
14
+ import random
15
+ import time
16
+ import safetensors.torch
17
+ from safetensors.torch import load_file
18
+ from huggingface_hub import HfFileSystem, ModelCard
19
+ from huggingface_hub import login, hf_hub_download
20
+
21
+
22
+ # Load LoRAs from JSON file
23
+ with open('loras.json', 'r') as f:
24
+ loras = json.load(f)
25
+
26
+ # Initialize the base model
27
+ dtype = torch.float16
28
+ base_model = "AlekseyCalvin/SilverAgePoets_FluxS_TestAlpha_Diffusers"
29
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
30
+ #pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
31
+ torch.cuda.empty_cache()
32
+
33
+ clipmodel = 'norm'
34
+ if clipmodel == "long":
35
+ model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
36
+ config = CLIPConfig.from_pretrained(model_id)
37
+ maxtokens = 77
38
+ if clipmodel == "norm":
39
+ model_id = "zer0int/CLIP-GmP-ViT-L-14"
40
+ config = CLIPConfig.from_pretrained(model_id)
41
+ maxtokens = 77
42
+ clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
43
+ clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
44
+
45
+ pipe.tokenizer = clip_processor.tokenizer
46
+ pipe.text_encoder = clip_model.text_model
47
+ pipe.tokenizer_max_length = maxtokens
48
+ pipe.text_encoder.dtype = torch.bfloat16
49
+
50
+
51
+ MAX_SEED = 2**32-1
52
+
53
+ class calculateDuration:
54
+ def __init__(self, activity_name=""):
55
+ self.activity_name = activity_name
56
+
57
+ def __enter__(self):
58
+ self.start_time = time.time()
59
+ return self
60
+
61
+ def __exit__(self, exc_type, exc_value, traceback):
62
+ self.end_time = time.time()
63
+ self.elapsed_time = self.end_time - self.start_time
64
+ if self.activity_name:
65
+ print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
66
+ else:
67
+ print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
68
+
69
+
70
+ def update_selection(evt: gr.SelectData, width, height):
71
+ selected_lora = loras[evt.index]
72
+ new_placeholder = f"Type a prompt for {selected_lora['title']}"
73
+ lora_repo = selected_lora["repo"]
74
+ updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
75
+ if "aspect" in selected_lora:
76
+ if selected_lora["aspect"] == "portrait":
77
+ width = 768
78
+ height = 1024
79
+ elif selected_lora["aspect"] == "landscape":
80
+ width = 1024
81
+ height = 768
82
+ return (
83
+ gr.update(placeholder=new_placeholder),
84
+ updated_text,
85
+ evt.index,
86
+ width,
87
+ height,
88
+ )
89
+
90
+ @spaces.GPU(duration=50)
91
+ def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
92
+ pipe.to("cuda")
93
+ generator = torch.Generator(device="cuda").manual_seed(seed)
94
+
95
+ with calculateDuration("Generating image"):
96
+ # Generate image
97
+ image = pipe(
98
+ prompt=f"{prompt} {trigger_word}",
99
+ num_inference_steps=steps,
100
+ guidance_scale=cfg_scale,
101
+ width=width,
102
+ height=height,
103
+ generator=generator,
104
+ joint_attention_kwargs={"scale": lora_scale},
105
+ ).images[0]
106
+ return image
107
+
108
+ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
109
+ if selected_index is None:
110
+ raise gr.Error("You must select a LoRA before proceeding.")
111
+
112
+ selected_lora = loras[selected_index]
113
+ lora_path = selected_lora["repo"]
114
+ trigger_word = selected_lora["trigger_word"]
115
+
116
+ # Load LoRA weights
117
+ with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
118
+ if "weights" in selected_lora:
119
+ pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
120
+ else:
121
+ pipe.load_lora_weights(lora_path)
122
+
123
+ # Set random seed for reproducibility
124
+ with calculateDuration("Randomizing seed"):
125
+ if randomize_seed:
126
+ seed = random.randint(0, MAX_SEED)
127
+
128
+ image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress)
129
+ pipe.to("cpu")
130
+ pipe.unload_lora_weights()
131
+ return image, seed
132
+
133
+ run_lora.zerogpu = True
134
+
135
+ css = '''
136
+ #gen_btn{height: 100%}
137
+ #title{text-align: center}
138
+ #title h1{font-size: 3em; display:inline-flex; align-items:center}
139
+ #title img{width: 100px; margin-right: 0.5em}
140
+ #gallery .grid-wrap{height: 10vh}
141
+ '''
142
+ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
143
+ title = gr.HTML(
144
+ """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> SOONfactory </h1>""",
145
+ elem_id="title",
146
+ )
147
+ # Info blob stating what the app is running
148
+ info_blob = gr.HTML(
149
+ """<div id="info_blob"> Img. Manufactory Running On: Our 'Historic Color SOON®' Schnell/Pixelwave-base Model (at AlekseyCalvin/HistoricColorSoonr_Schnell). Now testing related LoRAs (#s2-8,11,12,14,16)for merging. </div>"""
150
+ )
151
+
152
+ # Info blob stating what the app is running
153
+ info_blob = gr.HTML(
154
+ """<div id="info_blob">Prephrase prompts w/: 1: RCA style || 2-thru-12: HST style analog film photo; HST autochrome photograph || 13: HST style in Peterhof || 14: LEN Vladimir Lenin || 15: SOTS style || 16: crisp photo || 17: TOK hybrid || 18: 2004 photo || 19: TOK portra || 20: flmft Kodachrome || 21: HST Austin Osman Spare style || 22: polaroid photo || 23: pficonics || 24: wh3r3sw4ld0 || 25: retrofuturism || 26: vintage cover || </div>"""
155
+ )
156
+ selected_index = gr.State(None)
157
+ with gr.Row():
158
+ with gr.Column(scale=3):
159
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Select LoRa/Style & type prompt!")
160
+ with gr.Column(scale=1, elem_id="gen_column"):
161
+ generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
162
+ with gr.Row():
163
+ with gr.Column(scale=3):
164
+ selected_info = gr.Markdown("")
165
+ gallery = gr.Gallery(
166
+ [(item["image"], item["title"]) for item in loras],
167
+ label="LoRA Inventory",
168
+ allow_preview=False,
169
+ columns=3,
170
+ elem_id="gallery"
171
+ )
172
+
173
+ with gr.Column(scale=4):
174
+ result = gr.Image(label="Generated Image")
175
+
176
+ with gr.Row():
177
+ with gr.Accordion("Advanced Settings", open=True):
178
+ with gr.Column():
179
+ with gr.Row():
180
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=0, maximum=20, step=.5, value=1.3)
181
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=6)
182
+
183
+ with gr.Row():
184
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=768)
185
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=768)
186
+
187
+ with gr.Row():
188
+ randomize_seed = gr.Checkbox(True, label="Randomize seed")
189
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
190
+ lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1.9, step=0.01, value=0.5)
191
+
192
+ gallery.select(
193
+ update_selection,
194
+ inputs=[width, height],
195
+ outputs=[prompt, selected_info, selected_index, width, height]
196
+ )
197
+
198
+ gr.on(
199
+ triggers=[generate_button.click, prompt.submit],
200
+ fn=run_lora,
201
+ inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
202
+ outputs=[result, seed]
203
+ )
204
+
205
+ app.queue(default_concurrency_limit=2).launch(show_error=True)
206
+ app.launch()