nevreal commited on
Commit
e03ee37
·
verified ·
1 Parent(s): b2e8669

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -31
app.py CHANGED
@@ -1,42 +1,63 @@
1
  import gradio as gr
2
- from diffusers import DiffusionPipeline
 
3
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
 
6
- # Function to generate image based on input text
7
- def generate_image(basem, model, prompt):
8
- # Load the pipeline
9
-
10
- pipeline = DiffusionPipeline.from_pretrained(basem)
11
- pipeline.load_lora_weights(model)
12
 
13
- # Generate the image using the text prompt
14
- image = pipeline(prompt).images[0]
15
- return image
16
 
17
- # Create Gradio interface
18
- with gr.Blocks() as demo:
19
- # Title
20
- gr.Markdown("# Text-to-Image Generation WebUI")
 
 
 
 
 
 
 
21
 
22
- with gr.Row():
23
- base_model = gr.Textbox(label="Enter your base model here", placeholder="John6666/mala-anime-mix-nsfw-pony-xl-v3-sdxl")
24
- main_model = gr.Textbox(label="Enter your main model here", placeholder="nevreal/vMurderDrones")
25
-
26
-
27
- # Input for text prompt
28
- with gr.Row():
29
- prompt = gr.Textbox(label="Enter your prompt here", placeholder="Type your text prompt...")
30
 
31
- # Output image display
32
- with gr.Column(scale=4):
33
- output_image = gr.Image(label="Generated Image")
 
 
34
 
35
- # Button to trigger the image generation
36
- generate_button = gr.Button("Generate Image")
37
 
38
- # When the button is clicked, call the generate_image function
39
- generate_button.click(fn=generate_image, inputs=[base_model, main_model, prompt], outputs=output_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- # Launch the interface
42
- demo.launch()
 
 
1
  import gradio as gr
2
+ from random import randint
3
+ from all_models import models
4
 
5
+ def load_fn(models):
6
+ global models_load
7
+ models_load = {}
8
+
9
+ for model in models:
10
+ if model not in models_load.keys():
11
+ try:
12
+ m = gr.load(f'models/{model}')
13
+ except Exception as error:
14
+ m = gr.Interface(lambda txt: None, ['text'], ['image'])
15
+ models_load.update({model: m})
16
 
17
 
18
+ load_fn(models)
 
 
 
 
 
19
 
 
 
 
20
 
21
+ num_models = 6
22
+ default_models = models[:num_models]
23
+
24
+
25
+ def extend_choices(choices):
26
+ return choices + (num_models - len(choices)) * ['NA']
27
+
28
+
29
+ def update_imgbox(choices):
30
+ choices_plus = extend_choices(choices)
31
+ return [gr.Image(None, label = m, visible = (m != 'NA')) for m in choices_plus]
32
 
 
 
 
 
 
 
 
 
33
 
34
+ def gen_fn(model_str, prompt):
35
+ if model_str == 'NA':
36
+ return None
37
+ noise = str(randint(0, 99999999999))
38
+ return models_load[model_str](f'{prompt} {noise}')
39
 
 
 
40
 
41
+ with gr.Blocks() as demo:
42
+ model_choice2 = gr.Dropdown(models, label = 'Choose model', value = models[0], filterable = False)
43
+ txt_input2 = gr.Textbox(label = 'Prompt text')
44
+
45
+ max_images = 6
46
+ num_images = gr.Slider(1, max_images, value = max_images, step = 1, label = 'Number of images')
47
+
48
+ gen_button2 = gr.Button('Generate')
49
+ stop_button2 = gr.Button('Stop', variant = 'secondary', interactive = False)
50
+ gen_button2.click(lambda s: gr.update(interactive = True), None, stop_button2)
51
+
52
+ with gr.Row():
53
+ output2 = [gr.Image(label = '') for _ in range(max_images)]
54
+
55
+ for i, o in enumerate(output2):
56
+ img_i = gr.Number(i, visible = False)
57
+ num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o)
58
+ gen_event2 = gen_button2.click(lambda i, n, m, t: gen_fn(m, t) if (i < n) else None, [img_i, num_images, model_choice2, txt_input2], o)
59
+ stop_button2.click(lambda s: gr.update(interactive = False), None, stop_button2, cancels = [gen_event2])
60
 
61
+
62
+ demo.queue(concurrency_count = 36)
63
+ demo.launch()