Spaces:
Running
on
Zero
Running
on
Zero
File size: 7,354 Bytes
0e0ee20 47d4812 0e0ee20 7dc34c1 47d4812 7039ded 607d766 47d4812 f560ce0 47d4812 9651e1f 47d4812 f560ce0 0e0ee20 f3e96f9 c59400c e2c1d93 6f8ccda 0e0ee20 6f8ccda 0e0ee20 fb2fad6 d857e4f aad2ddd 5b82e60 72cad74 b9cc145 72cad74 f560ce0 b9daa28 049add4 b9daa28 e42d93d f560ce0 b9daa28 a6aa35a b9daa28 049add4 f560ce0 f603013 582c2e7 f560ce0 4616428 396445c f560ce0 396445c 72cad74 f3e96f9 0e0ee20 b488680 e2c1d93 c59400c 0e0ee20 e2c1d93 fd8e800 d857e4f 13d0dc5 3cdd70e 72cad74 0e0ee20 0b93385 1441e58 07d3eff 504da62 07d3eff 4179a4c a044e21 0e0ee20 d6802e8 02302e4 07d3eff 0e0ee20 db98dea 1fff27d 9cbafa8 b220271 a044e21 1fff27d db98dea 8dce9c7 0e0ee20 b220271 2c6d128 1cbd1d7 2c6d128 6f8ccda 2c6d128 6f8ccda 0e0ee20 5ecece8 eb504a3 5ecece8 07d3eff 0e0ee20 f3e96f9 7fb9e28 0e0ee20 d6802e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
import gradio as gr
import json
import logging
import torch
from PIL import Image, PngImagePlugin
import spaces
from diffusers import DiffusionPipeline
from transformers.utils.hub import move_cache
import copy
import random
import os
import pygsheets
import time
from datetime import datetime
# Move cache
move_cache()
# Initialize GSheet Connexion
#Authorization
gc = pygsheets.authorize(service_account_env_var='GSHEET_AUTH')
#Open the google spreadsheet
sh = gc.open('AndroFLUX-Logs')
#Select the second sheet
wks = sh[1]
# Load LoRAs from JSON file
with open('loras.json', 'r') as f:
loras = json.load(f)
# Initialize the base model
base_model = "black-forest-labs/FLUX.1-dev"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
MAX_SEED = 2**32-1
class calculateDuration:
def __init__(self, activity_name=""):
self.activity_name = activity_name
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end_time = time.time()
self.elapsed_time = self.end_time - self.start_time
if self.activity_name:
print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
else:
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
def update_selection(evt: gr.SelectData):
selected_lora = loras[evt.index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
return (
gr.update(placeholder=new_placeholder),
updated_text,
evt.index
)
@spaces.GPU(duration=90)
def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
pipe.to("cuda")
generator = torch.Generator(device="cuda").manual_seed(seed)
with calculateDuration("Generating image"):
# Generate image
image = pipe(
prompt=f"{prompt} {trigger_word}",
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
max_sequence_length=512
).images[0]
# Save the image to a file with a unique name in /tmp directory
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
image_filename = f"generated_image_{timestamp}.png"
#create temp directory if not exist
newpath = r'/tmp/gradio'
if not os.path.exists(newpath):
os.makedirs(newpath)
image_path = os.path.join("/tmp/gradio", image_filename)
# Add Metadata
new_metadata_string = f"{prompt}\nNegative prompt: none \nSteps: {steps}, CFG scale: {cfg_scale}, Seed: {seed}, Lora hashes: AndroFlux-v19: c44afd41ece1"
metadata = PngImagePlugin.PngInfo()
metadata.add_text("parameters", new_metadata_string)
#Save image in file
image.save(image_path, pnginfo=metadata)
# Construct the URL to access the image
space_url = "https://killwithabass-flux-gay-lora-explorer.hf.space/gradio_api"
image_url = f"{space_url}/file={image_path}"
#Log queries
try:
if "girl" not in prompt and "woman" not in prompt:
wks.append_table(values=[prompt, cfg_scale, steps, seed, width, height, lora_scale,image_url])
except Exception as error:
# handle the exception
print("An exception occurred:", error)
print(f"Image URL: {image_url}") # Log the file URL
return image
def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
if selected_index is None:
raise gr.Error("You must select a LoRA before proceeding.")
selected_lora = loras[selected_index]
lora_path = selected_lora["repo"]
trigger_word = selected_lora["trigger_word"]
# Load LoRA weights
with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
if "weights" in selected_lora:
pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
else:
pipe.load_lora_weights(lora_path)
# Set random seed for reproducibility
with calculateDuration("Randomizing seed"):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress)
print("Model : " + selected_lora["title"] + " Prompt : " + prompt)
pipe.to("cpu")
pipe.unload_lora_weights()
return image, seed
run_lora.zerogpu = True
css = '''
#gen_btn{height: 100%}
#title{text-align: center;}
#title h1{font-size: 3em; display:inline-flex; align-items:center}
#title img{width: 100px; margin-right: 0.5em}
'''
with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
gr.Markdown("# Gay LoRAs Explorer for FLUX 1 DEV")
selected_index = gr.State(None)
with gr.Row():
with gr.Column(scale=3):
prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
with gr.Column(scale=1, elem_id="gen_column"):
generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
with gr.Row():
with gr.Column(scale=3):
selected_info = gr.Markdown("")
with gr.Accordion("LoRA Gallery", open=False):
gallery = gr.Gallery(
[(item["image"], item["title"]) for item in loras],
label="LoRAs",
allow_preview=False,
columns=3
)
gr.Markdown("*You can add more models by creating a Pull Request to modify the file loras.json*")
with gr.Column(scale=4):
result = gr.Image(label="Generated Image")
with gr.Row():
with gr.Accordion("Advanced Settings", open=True):
with gr.Column():
with gr.Row():
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=896)
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1152)
with gr.Row():
randomize_seed = gr.Checkbox(True, label="Randomize seed")
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=1)
gallery.select(
update_selection,
outputs=[prompt, selected_info, selected_index]
)
gr.on(
triggers=[generate_button.click, prompt.submit],
fn=run_lora,
inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
outputs=[result, seed]
)
app.queue()
app.launch() |