|
import os |
|
import gradio as gr |
|
from random import randint |
|
from operator import itemgetter |
|
import bisect |
|
from all_models2 import tags_plus_models,models,models_plus_tags,find_warm_model_list |
|
from datetime import datetime |
|
from externalmod import gr_Interface_load |
|
import asyncio |
|
import os |
|
from threading import RLock |
|
lock = RLock() |
|
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None |
|
|
|
now2 = 0 |
|
inference_timeout = 300 |
|
MAX_SEED = 2**32-1 |
|
|
|
|
|
nb_rep=2 |
|
nb_mod_dif=20 |
|
nb_models=nb_mod_dif*nb_rep |
|
|
|
cache_image={} |
|
cache_image_actu={} |
|
|
|
def split_models(models,nb_models): |
|
models_temp=[] |
|
models_lis_temp=[] |
|
i=0 |
|
for m in models: |
|
models_temp.append(m) |
|
i=i+1 |
|
if i%nb_models==0: |
|
models_lis_temp.append(models_temp) |
|
models_temp=[] |
|
if len(models_temp)>1: |
|
models_lis_temp.append(models_temp) |
|
return models_lis_temp |
|
|
|
def split_models_axb(models,a,b): |
|
models_temp=[] |
|
models_lis_temp=[] |
|
i=0 |
|
nb_models=b |
|
for m in models: |
|
for j in range(a): |
|
models_temp.append(m) |
|
i=i+1 |
|
if i%nb_models==0: |
|
models_lis_temp.append(models_temp) |
|
models_temp=[] |
|
if len(models_temp)>1: |
|
models_lis_temp.append(models_temp) |
|
return models_lis_temp |
|
|
|
def split_models_8x3(models,nb_models): |
|
models_temp=[] |
|
models_lis_temp=[] |
|
i=0 |
|
nb_models_x3=8 |
|
for m in models: |
|
models_temp.append(m) |
|
i=i+1 |
|
if i%nb_models_x3==0: |
|
models_lis_temp.append(models_temp+models_temp+models_temp) |
|
models_temp=[] |
|
if len(models_temp)>1: |
|
models_lis_temp.append(models_temp+models_temp+models_temp) |
|
return models_lis_temp |
|
|
|
def construct_list_models(tags_plus_models,nb_rep,nb_mod_dif): |
|
list_temp=[] |
|
output=[] |
|
for tag_plus_models in tags_plus_models: |
|
list_temp=split_models_axb(tag_plus_models[2],nb_rep,nb_mod_dif) |
|
list_temp2=[] |
|
i=0 |
|
for elem in list_temp: |
|
list_temp2.append([f"{tag_plus_models[0]}_{i+1}/{len(list_temp)} ({len(elem)}) : {elem[0]} - {elem[len(elem)-1]}" ,elem]) |
|
i+=1 |
|
output.append([f"{tag_plus_models[0]} ({tag_plus_models[1]})",list_temp2]) |
|
tag_plus_models[0]=f"{tag_plus_models[0]} ({tag_plus_models[1]})" |
|
return output |
|
|
|
models_test = [] |
|
models_test = construct_list_models(tags_plus_models,nb_rep,nb_mod_dif) |
|
|
|
def get_current_time(): |
|
now = datetime.now() |
|
now2 = now |
|
current_time = now2.strftime("%Y-%m-%d %H:%M:%S") |
|
kii = "" |
|
ki = f'{kii} {current_time}' |
|
return ki |
|
|
|
def load_fn_original(models): |
|
global models_load |
|
global num_models |
|
global default_models |
|
models_load = {} |
|
num_models = len(models) |
|
if num_models!=0: |
|
default_models = models[:num_models] |
|
else: |
|
default_models = {} |
|
for model in models: |
|
if model not in models_load.keys(): |
|
try: |
|
m = gr.load(f'models/{model}') |
|
except Exception as error: |
|
m = gr.Interface(lambda txt: None, ['text'], ['image']) |
|
print(error) |
|
models_load.update({model: m}) |
|
|
|
def load_fn(models): |
|
global models_load |
|
global num_models |
|
global default_models |
|
models_load = {} |
|
num_models = len(models) |
|
i=0 |
|
if num_models!=0: |
|
default_models = models[:num_models] |
|
else: |
|
default_models = {} |
|
for model in models: |
|
i+=1 |
|
if i%50==0: |
|
print("\n\n\n-------"+str(i)+'/'+str(len(models))+"-------\n\n\n") |
|
if model not in models_load.keys(): |
|
try: |
|
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN) |
|
except Exception as error: |
|
m = gr.Interface(lambda txt: None, ['text'], ['image']) |
|
print(error) |
|
models_load.update({model: m}) |
|
|
|
|
|
"""models = models_test[1]""" |
|
|
|
load_fn(models) |
|
"""models = {} |
|
load_fn(models)""" |
|
|
|
|
|
def extend_choices(choices): |
|
return choices + (nb_models - len(choices)) * ['NA'] |
|
"""return choices + (num_models - len(choices)) * ['NA']""" |
|
|
|
def extend_choices_b(choices): |
|
choices_plus = extend_choices(choices) |
|
return [gr.Textbox(m, visible=False) for m in choices_plus] |
|
|
|
def update_imgbox(choices): |
|
choices_plus = extend_choices(choices) |
|
return [gr.Image(None, label=m,interactive=False, visible=(m != 'NA'),show_share_button=False) for m in choices_plus] |
|
|
|
def choice_group_a(group_model_choice): |
|
return group_model_choice |
|
|
|
def choice_group_b(group_model_choice): |
|
choiceTemp =choice_group_a(group_model_choice) |
|
choiceTemp = extend_choices(choiceTemp) |
|
"""return [gr.Image(label=m, min_width=170, height=170) for m in choice]""" |
|
return [gr.Image(None, label=m,interactive=False, visible=(m != 'NA'),show_share_button=False) for m in choiceTemp] |
|
|
|
def choice_group_c(group_model_choice): |
|
choiceTemp=choice_group_a(group_model_choice) |
|
choiceTemp = extend_choices(choiceTemp) |
|
return [gr.Textbox(m) for m in choiceTemp] |
|
|
|
def choice_group_d(group_model_choice): |
|
choiceTemp=choice_group_a(group_model_choice) |
|
choiceTemp = extend_choices(choiceTemp) |
|
return [gr.Textbox(choiceTemp[i*nb_rep], visible=(choiceTemp[i*nb_rep] != 'NA'),show_label=False) for i in range(nb_mod_dif)] |
|
def choice_group_e(group_model_choice): |
|
choiceTemp=choice_group_a(group_model_choice) |
|
choiceTemp = extend_choices(choiceTemp) |
|
return [gr.Column(visible=(choiceTemp[i*nb_rep] != 'NA')) for i in range(nb_mod_dif)] |
|
|
|
def cutStrg(longStrg,start,end): |
|
shortStrg='' |
|
for i in range(end-start): |
|
shortStrg+=longStrg[start+i] |
|
return shortStrg |
|
|
|
def aff_models_perso(txt_list_perso,nb_models=nb_models,models=models): |
|
list_perso=[] |
|
t1=True |
|
start=txt_list_perso.find('\"') |
|
if start!=-1: |
|
while t1: |
|
start+=1 |
|
end=txt_list_perso.find('\"',start) |
|
if end != -1: |
|
txtTemp=cutStrg(txt_list_perso,start,end) |
|
if txtTemp in models: |
|
list_perso.append(cutStrg(txt_list_perso,start,end)) |
|
else : |
|
t1=False |
|
start=txt_list_perso.find('\"',end+1) |
|
if start==-1: |
|
t1=False |
|
if len(list_perso)>=nb_models: |
|
t1=False |
|
return list_perso |
|
|
|
def aff_models_perso_b(txt_list_perso): |
|
return choice_group_b(aff_models_perso(txt_list_perso)) |
|
|
|
def aff_models_perso_c(txt_list_perso): |
|
return choice_group_c(aff_models_perso(txt_list_perso)) |
|
|
|
|
|
def tag_choice(group_tag_choice): |
|
return gr.Dropdown(label="List of Models with the chosen Tag", show_label=True, choices=list(group_tag_choice) , interactive = True , filterable = False) |
|
|
|
def test_pass(test): |
|
if test==os.getenv('p'): |
|
print("ok") |
|
return gr.Dropdown(label="Lists Tags", show_label=True, choices=list(models_test) , interactive = True) |
|
else: |
|
print("nop") |
|
return gr.Dropdown(label="Lists Tags", show_label=True, choices=list([]) , interactive = True) |
|
|
|
def test_pass_aff(test): |
|
if test==os.getenv('p'): |
|
return gr.Accordion( open=True, visible=True) ,gr.Row(visible=False) |
|
else: |
|
return gr.Accordion( open=True, visible=False) , gr.Row() |
|
|
|
|
|
|
|
|
|
async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout): |
|
from pathlib import Path |
|
kwargs = {} |
|
if height is not None and height >= 256: kwargs["height"] = height |
|
if width is not None and width >= 256: kwargs["width"] = width |
|
if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps |
|
if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg |
|
if seed >= 0: kwargs["seed"] = seed |
|
else: kwargs["seed"] = randint(1, MAX_SEED-1) |
|
|
|
|
|
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, |
|
prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN)) |
|
await asyncio.sleep(0) |
|
try: |
|
result = await asyncio.wait_for(task, timeout=timeout) |
|
except (Exception, asyncio.TimeoutError) as e: |
|
print(e) |
|
print(f"Task timed out: {model_str}") |
|
if not task.done(): task.cancel() |
|
result = None |
|
if task.done() and result is not None: |
|
with lock: |
|
png_path = "image.png" |
|
result.save(png_path) |
|
image = str(Path(png_path).resolve()) |
|
return image |
|
return None |
|
|
|
def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1): |
|
if model_str == 'NA': |
|
return None |
|
try: |
|
loop = asyncio.new_event_loop() |
|
result = loop.run_until_complete(infer(model_str, prompt, nprompt, |
|
height, width, steps, cfg, seed, inference_timeout)) |
|
except (Exception, asyncio.CancelledError) as e: |
|
print(e) |
|
print(f"Task aborted: {model_str}") |
|
result = None |
|
finally: |
|
loop.close() |
|
return result |
|
|
|
def gen_fn_original(model_str, prompt): |
|
if model_str == 'NA': |
|
return None |
|
noise = str(randint(0, 9999)) |
|
try : |
|
m=models_load[model_str](f'{prompt} {noise}') |
|
except Exception as error : |
|
print("error : " + model_str) |
|
print(error) |
|
m=False |
|
|
|
return m |
|
|
|
|
|
def add_gallery(image, model_str, gallery): |
|
if gallery is None: gallery = [] |
|
|
|
if image is not None: gallery.append((image, model_str)) |
|
return gallery |
|
|
|
def reset_gallery(gallery): |
|
return add_gallery(None,"",[]) |
|
|
|
def load_gallery(gallery,id): |
|
gallery = reset_gallery(gallery) |
|
for c in cache_image[f"{id}"]: |
|
gallery=add_gallery(c[0],c[1],gallery) |
|
return gallery |
|
def load_gallery_sorted(gallery,id): |
|
gallery = reset_gallery(gallery) |
|
for c in sorted(cache_image[f"{id}"], key=itemgetter(1)): |
|
gallery=add_gallery(c[0],c[1],gallery) |
|
return gallery |
|
def load_gallery_actu(gallery,id): |
|
gallery = reset_gallery(gallery) |
|
for c in cache_image_actu[f"{id}"]: |
|
gallery=add_gallery(c[0],c[1],gallery) |
|
return gallery |
|
|
|
def add_cache_image(image, model_str,id,cache_image=cache_image): |
|
if image is not None: |
|
cache_image[f"{id}"].append((image,model_str)) |
|
|
|
return |
|
def add_cache_image_actu(image, model_str,id,cache_image_actu=cache_image_actu): |
|
if image is not None: |
|
bisect.insort(cache_image_actu[f"{id}"],(image, model_str), key=itemgetter(1)) |
|
|
|
return |
|
def reset_cache_image(id,cache_image=cache_image): |
|
cache_image[f"{id}"].clear() |
|
return |
|
def reset_cache_image_actu(id,cache_image_actu=cache_image_actu): |
|
cache_image_actu[f"{id}"].clear() |
|
return |
|
def reset_cache_image_all_sessions(cache_image=cache_image,cache_image_actu=cache_image_actu): |
|
for key, listT in cache_image.items(): |
|
listT.clear() |
|
for key, listT in cache_image_actu.items(): |
|
listT.clear() |
|
return |
|
|
|
def set_session(id): |
|
if id==0: |
|
randTemp=randint(1,MAX_SEED) |
|
cache_image[f"{randTemp}"]=[] |
|
cache_image_actu[f"{randTemp}"]=[] |
|
return gr.Number(visible=False,value=randTemp) |
|
else : |
|
return id |
|
def print_info_sessions(): |
|
lenTot=0 |
|
print("###################################") |
|
print("number of sessions : "+str(len(cache_image))) |
|
for key, listT in cache_image.items(): |
|
print("session "+key+" : "+str(len(listT))) |
|
lenTot+=len(listT) |
|
print("images total = "+str(lenTot)) |
|
print("###################################") |
|
return |
|
|
|
def disp_models(group_model_choice,nb_rep=nb_rep): |
|
listTemp=[] |
|
strTemp='\n' |
|
i=0 |
|
for m in group_model_choice: |
|
if m not in listTemp: |
|
listTemp.append(m) |
|
for m in listTemp: |
|
i+=1 |
|
strTemp+="\"" + m + "\",\n" |
|
if i%(8/nb_rep)==0: |
|
strTemp+="\n" |
|
return gr.Textbox(label="models",value=strTemp) |
|
|
|
def search_models(str_search,tags_plus_models=tags_plus_models): |
|
output1="\n" |
|
output2="" |
|
for m in tags_plus_models[0][2]: |
|
if m.find(str_search)!=-1: |
|
output1+="\"" + m + "\",\n" |
|
outputPlus="\n From tags : \n\n" |
|
for tag_plus_models in tags_plus_models: |
|
if str_search.lower() == tag_plus_models[0].lower() and str_search!="": |
|
for m in tag_plus_models[2]: |
|
output2+="\"" + m + "\",\n" |
|
if output2 != "": |
|
output=output1+outputPlus+output2 |
|
else : |
|
output=output1 |
|
return gr.Textbox(label="out",value=output) |
|
|
|
def search_info(txt_search_info,models_plus_tags=models_plus_tags): |
|
outputList=[] |
|
if txt_search_info.find("\"")!=-1: |
|
start=txt_search_info.find("\"")+1 |
|
end=txt_search_info.find("\"",start) |
|
m_name=cutStrg(txt_search_info,start,end) |
|
else : |
|
m_name = txt_search_info |
|
for m in models_plus_tags: |
|
if m_name == m[0]: |
|
outputList=m[1] |
|
if len(outputList)==0: |
|
outputList.append("Model Not Find") |
|
return gr.Textbox(label="out",value=outputList) |
|
|
|
def add_in_blacklist(bl,model): |
|
return gr.Textbox(bl+(f"\"{model}\",\n")) |
|
def add_in_fav(fav,model): |
|
return gr.Textbox(fav+(f"\"{model}\",\n")) |
|
def rand_from_all_all_models(): |
|
if len(tags_plus_models[0][2])<nb_mod_dif: |
|
return choice_group_c(tags_plus_models[0][2]) |
|
else: |
|
result=[] |
|
list_index_temp=[] |
|
for i in range(len(tags_plus_models[0][2])): |
|
list_index_temp.append(i) |
|
for i in range(nb_mod_dif): |
|
index_temp=randint(1,len(list_index_temp))-1 |
|
for j in range(nb_rep): |
|
result.append(gr.Textbox(tags_plus_models[0][2][list_index_temp[index_temp]])) |
|
list_index_temp.remove(list_index_temp[index_temp]) |
|
return result |
|
def rand_from_tag_all_models(index): |
|
if len(tags_plus_models[index][2])<nb_mod_dif: |
|
return choice_group_c(models_test[index][1][0][1]) |
|
else: |
|
result=[] |
|
list_index_temp=[] |
|
for i in range(len(tags_plus_models[index][2])): |
|
list_index_temp.append(i) |
|
for i in range(nb_mod_dif): |
|
index_temp=randint(1,len(list_index_temp))-1 |
|
for j in range(nb_rep): |
|
result.append(gr.Textbox(tags_plus_models[index][2][list_index_temp[index_temp]])) |
|
list_index_temp.remove(list_index_temp[index_temp]) |
|
return result |
|
|
|
def find_index_tag(group_tag_choice): |
|
for i in (range(len(models_test)-1)): |
|
if models_test[i][1]==group_tag_choice: |
|
return gr.Number(i) |
|
return gr.Number(0) |
|
|
|
|
|
def fonc_search_warm_models(tag,b_format): |
|
if tag == "": |
|
tagT=["stable-diffusion-xl"] |
|
else: |
|
tagT=["stable-diffusion-xl",tag] |
|
models_temp , models_plus_tags_temp = find_warm_model_list("John6666", tagT, "", "last_modified", 10000) |
|
s="" |
|
if b_format: |
|
rep=nb_rep |
|
else: |
|
rep=1 |
|
for m in models_temp: |
|
if m in models: |
|
for i in range(rep): |
|
s+=f"\"{m}\",\n" |
|
return gr.Textbox(s) |
|
|
|
def ratio_chosen(choice_ratio,width,height): |
|
if choice_ratio == [None,None]: |
|
return width , height |
|
else : |
|
return gr.Slider(label="Width", info="If 0, the default value is used.", maximum=2024, step=32, value=choice_ratio[0]), gr.Slider(label="Height", info="If 0, the default value is used.", maximum=2024, step=32, value=choice_ratio[1]) |
|
|
|
list_ratios=[["None",[None,None]], |
|
["4:1 (2048 x 512)",[2048,512]], |
|
["12:5 (1536 x 640)",[1536,640]], |
|
["~16:9 (1344 x 768)",[1344,768]], |
|
["~3:2 (1216 x 832)",[1216,832]], |
|
["~4:3 (1152 x 896)",[1152,896]], |
|
["1:1 (1024 x 1024)",[1024,1024]], |
|
["~3:4 (896 x 1152)",[896,1152]], |
|
["~2:3 (832 x 1216)",[832,1216]], |
|
["~9:16 (768 x 1344)",[768,1344]], |
|
["5:12 (640 x 1536)",[640,1536]], |
|
["1:4 (512 x 2048)",[512,2048]]] |
|
|
|
def make_me(): |
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=4): |
|
with gr.Group(): |
|
txt_input = gr.Textbox(label='Your prompt:', lines=3) |
|
with gr.Accordion("Advanced", open=False, visible=True): |
|
neg_input = gr.Textbox(label='Negative prompt:', lines=1) |
|
with gr.Row(): |
|
width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0) |
|
height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0) |
|
with gr.Row(): |
|
choice_ratio = gr.Dropdown(label="Ratio Width/Height", |
|
info="OverWrite Width and Height (W*H<1024*1024)", |
|
show_label=True, choices=list(list_ratios) , interactive = True, value=list_ratios[0][1]) |
|
choice_ratio.change(ratio_chosen,[choice_ratio,width,height],[width,height]) |
|
with gr.Row(): |
|
steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0) |
|
cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0) |
|
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1) |
|
|
|
|
|
gen_button = gr.Button('Generate images', scale=3) |
|
stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1) |
|
|
|
gen_button.click(lambda: gr.update(interactive=True), None, stop_button) |
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row() as block_images: |
|
choices=[models_test[0][1][0][1][0]] |
|
output = [] |
|
current_models = [] |
|
|
|
block_images_liste = [] |
|
block_images_options_liste = [] |
|
button_rand_from_tag=[] |
|
button_rand_from_all=[] |
|
button_rand_from_fav=[] |
|
button_blacklisted=[] |
|
button_favorites=[] |
|
choices_plus = extend_choices(choices) |
|
for i in range(nb_mod_dif): |
|
with gr.Column(visible=(choices_plus[i*nb_rep] != 'NA')) as block_Temp : |
|
block_images_liste.append(block_Temp) |
|
with gr.Group(): |
|
with gr.Row(): |
|
for j in range(nb_rep): |
|
output.append(gr.Image(None, label=choices_plus[i*nb_rep+j],interactive=False, |
|
visible=(choices_plus[i*nb_rep+j] != 'NA'),show_label=False,show_share_button=False)) |
|
for j in range(nb_rep): |
|
current_models.append(gr.Textbox(choices_plus[i*nb_rep+j], visible=(j==0),show_label=False)) |
|
|
|
with gr.Row(visible=False) as block_Temp: |
|
block_images_options_liste.append(block_Temp) |
|
button_rand_from_tag.append(gr.Button("Random\nfrom tag")) |
|
button_rand_from_all.append(gr.Button("Random\nfrom all")) |
|
button_rand_from_fav.append(gr.Button("Random\nfrom fav")) |
|
button_blacklisted.append(gr.Button("put in\nblacklist")) |
|
button_favorites.append(gr.Button("put in\nfavorites")) |
|
|
|
|
|
|
|
|
|
|
|
for m, o in zip(current_models, output): |
|
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn, |
|
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o]) |
|
stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event]) |
|
|
|
with gr.Row() as blockPass: |
|
txt_input_p = gr.Textbox(label="Pass", lines=1) |
|
test_button = gr.Button(' ') |
|
|
|
|
|
with gr.Accordion( open=True, visible=False) as stuffs: |
|
with gr.Accordion("Advanced",open=False): |
|
images_options=gr.Checkbox(False,label="Images Options") |
|
images_options.change(lambda x:[gr.Row(visible=x) for b in range(nb_mod_dif)],[images_options],block_images_options_liste) |
|
blacklist_perso=gr.Textbox(label="Blacklist perso") |
|
fav_perso=gr.Textbox(label="Fav perso") |
|
button_rand_from_tag_all_models=gr.Button("Random all models from tag") |
|
button_rand_from_all_all_models=gr.Button("Random all models from all") |
|
button_rand_from_fav_all_models=gr.Button("Random all models from fav") |
|
|
|
|
|
with gr.Accordion("Warm models",open=False): |
|
with gr.Row(): |
|
text_warm_models=gr.Textbox("",label="list of warm model") |
|
with gr.Column(): |
|
text_tag_warm_models=gr.Textbox(lines=1) |
|
bool_format_models=gr.Checkbox(label="Format list",value=False) |
|
button_search_warm_models=gr.Button("search warm models") |
|
button_search_warm_models.click(fonc_search_warm_models,[text_tag_warm_models,bool_format_models],[text_warm_models]) |
|
button_load_warm_models = gr.Button('Load') |
|
button_load_warm_models.click(aff_models_perso_b,text_warm_models,output) |
|
button_load_warm_models.click(aff_models_perso_c,text_warm_models,current_models) |
|
|
|
|
|
with gr.Accordion("Gallery",open=False): |
|
with gr.Row(): |
|
|
|
|
|
id_session=gr.Number(visible=False,value=0) |
|
gen_button.click(set_session, id_session, id_session) |
|
cache_image[f"{id_session.value}"]=[] |
|
cache_image_actu[f"{id_session.value}"]=[] |
|
with gr.Column(): |
|
b11 = gr.Button('Load Galerry Actu') |
|
b12 = gr.Button('Load Galerry All') |
|
b13 = gr.Button('Load Galerry All (sorted)') |
|
gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery", |
|
interactive=False, show_share_button=True, container=True, format="png", |
|
preview=True, object_fit="cover",columns=4,rows=4) |
|
with gr.Column(): |
|
b21 = gr.Button('Reset Gallery') |
|
b22 = gr.Button('Reset Gallery All') |
|
b23 = gr.Button('Reset All Sessions') |
|
b24 = gr.Button('print info sessions') |
|
b11.click(load_gallery_actu,[gallery,id_session],gallery) |
|
b12.click(load_gallery,[gallery,id_session],gallery) |
|
b13.click(load_gallery_sorted,[gallery,id_session],gallery) |
|
b21.click(reset_gallery,[gallery],gallery) |
|
b22.click(reset_cache_image,[id_session],gallery) |
|
b23.click(reset_cache_image_all_sessions,[],[]) |
|
b24.click(print_info_sessions,[],[]) |
|
for m, o in zip(current_models, output): |
|
|
|
o.change(add_cache_image,[o,m,id_session],[]) |
|
o.change(add_cache_image_actu,[o,m,id_session],[]) |
|
gen_button.click(reset_cache_image_actu, [id_session], []) |
|
gen_button.click(lambda id:gr.Button('Load Galerry All ('+str(len(cache_image[f"{id}"]))+")"), [id_session], [b12]) |
|
|
|
with gr.Group(): |
|
with gr.Row(): |
|
|
|
group_tag_choice = gr.Dropdown(label="Lists Tags", show_label=True, choices=list(models_test), interactive = True,value=models_test[0][1]) |
|
|
|
index_tag=gr.Number(0,visible=False) |
|
|
|
with gr.Row(): |
|
group_model_choice = gr.Dropdown(label="List of Models with the chosen Tag", show_label=True, choices=list([]), interactive = True) |
|
group_model_choice.change(choice_group_b,group_model_choice,output) |
|
group_model_choice.change(choice_group_c,group_model_choice,current_models) |
|
|
|
group_model_choice.change(choice_group_e,group_model_choice,block_images_liste) |
|
group_tag_choice.change(tag_choice,group_tag_choice,group_model_choice) |
|
group_tag_choice.change(find_index_tag,group_tag_choice,index_tag) |
|
|
|
with gr.Accordion("Display/Load Models") : |
|
with gr.Row(): |
|
txt_list_models=gr.Textbox(label="Models Actu",value="") |
|
group_model_choice.change(disp_models,group_model_choice,txt_list_models) |
|
|
|
with gr.Column(): |
|
txt_list_perso = gr.Textbox(label='List Models Perso to Load') |
|
|
|
button_list_perso = gr.Button('Load') |
|
button_list_perso.click(aff_models_perso_b,txt_list_perso,output) |
|
button_list_perso.click(aff_models_perso_c,txt_list_perso,current_models) |
|
|
|
with gr.Row(): |
|
txt_search = gr.Textbox(label='Search in') |
|
txt_output_search = gr.Textbox(label='Search out') |
|
button_search = gr.Button('Research') |
|
button_search.click(search_models,txt_search,txt_output_search) |
|
|
|
with gr.Row(): |
|
txt_search_info = gr.Textbox(label='Search info in') |
|
txt_output_search_info = gr.Textbox(label='Search info out') |
|
button_search_info = gr.Button('Research info') |
|
button_search_info.click(search_info,txt_search_info,txt_output_search_info) |
|
|
|
|
|
with gr.Row(): |
|
test_button.click(test_pass_aff,txt_input_p,[stuffs,blockPass]) |
|
|
|
|
|
|
|
|
|
|
|
button_rand_from_all_all_models.click(rand_from_all_all_models,[],current_models) |
|
button_rand_from_tag_all_models.click(rand_from_tag_all_models,index_tag,current_models) |
|
for i in range(nb_mod_dif): |
|
|
|
|
|
|
|
|
|
button_blacklisted[i].click(add_in_blacklist,[blacklist_perso,current_models[i*nb_rep]],blacklist_perso) |
|
button_favorites[i].click(add_in_fav,[fav_perso,current_models[i*nb_rep]],fav_perso) |
|
|
|
|
|
|
|
gr.HTML(""" |
|
<div class="footer"> |
|
<p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier! |
|
</p> |
|
""") |
|
|
|
js_code = """ |
|
|
|
console.log('ghgh'); |
|
""" |
|
|
|
with gr.Blocks(theme="Nymbo/Nymbo_Theme", fill_width=True, css="div.float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}") as demo: |
|
gr.Markdown("<script>" + js_code + "</script>") |
|
make_me() |
|
|
|
|
|
|
|
|
|
demo.queue(default_concurrency_limit=200, max_size=200) |
|
demo.launch(max_threads=400) |