kayfahaarukku commited on
Commit
2efc5d6
1 Parent(s): f223a90

Generation Details take 2

Browse files
Files changed (1) hide show
  1. app.py +34 -10
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import os
2
- import spaces
3
  import torch
4
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
5
  import gradio as gr
@@ -26,7 +25,7 @@ def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_s
26
  seed = random.randint(0, 99999999)
27
  if use_defaults:
28
  prompt = f"{prompt}, masterpiece, best quality"
29
- negative_prompt = f"lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, {negative_prompt}"
30
  generator = torch.manual_seed(seed)
31
 
32
  def callback(step, timestep, latents):
@@ -48,15 +47,39 @@ def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_s
48
 
49
  torch.cuda.empty_cache()
50
 
51
- return image, seed
52
 
53
  # Define Gradio interface
54
  def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
55
- image, seed = generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress)
56
- return image, seed, gr.update(value=seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  def reset_inputs():
59
- return gr.update(value=''), gr.update(value='realistic, 3d,'), gr.update(value=True), gr.update(value='832x1216'), gr.update(value=7), gr.update(value=28), gr.update(value=0), gr.update(value=True)
 
 
 
 
 
 
 
 
 
 
 
60
 
61
  with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/miku@1.2.1") as demo:
62
  gr.HTML(
@@ -66,7 +89,7 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/miku@1.2.1") as d
66
  with gr.Row():
67
  with gr.Column():
68
  prompt_input = gr.Textbox(lines=2, placeholder="Enter prompt here", label="Prompt")
69
- negative_prompt_input = gr.Textbox(lines=2, placeholder="Enter negative prompt here", label="Negative Prompt", value="realistic, 3d,")
70
  use_defaults_input = gr.Checkbox(label="Use Default Quality Tags and Negative Prompt", value=True)
71
  resolution_input = gr.Radio(
72
  choices=[
@@ -85,6 +108,7 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/miku@1.2.1") as d
85
 
86
  with gr.Column():
87
  output_image = gr.Image(type="pil", label="Generated Image")
 
88
  gr.Markdown(
89
  """
90
  ### Recommended prompt formatting:
@@ -103,15 +127,15 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/miku@1.2.1") as d
103
  inputs=[
104
  prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
105
  ],
106
- outputs=[output_image, seed_input]
107
  )
108
 
109
  reset_button.click(
110
  reset_inputs,
111
  inputs=[],
112
  outputs=[
113
- prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
114
  ]
115
  )
116
 
117
- demo.queue(max_size=20).launch(share=False)
 
1
  import os
 
2
  import torch
3
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
4
  import gradio as gr
 
25
  seed = random.randint(0, 99999999)
26
  if use_defaults:
27
  prompt = f"{prompt}, masterpiece, best quality"
28
+ negative_prompt = f"nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, {negative_prompt}"
29
  generator = torch.manual_seed(seed)
30
 
31
  def callback(step, timestep, latents):
 
47
 
48
  torch.cuda.empty_cache()
49
 
50
+ return image, seed, prompt, negative_prompt, guidance_scale, num_inference_steps, resolution
51
 
52
  # Define Gradio interface
53
  def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
54
+ image, seed, prompt, negative_prompt, guidance_scale, num_inference_steps, resolution = generate_image(
55
+ prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress
56
+ )
57
+
58
+ generation_details = (
59
+ f"{prompt}\n"
60
+ f"Negative Prompt: {negative_prompt}\n"
61
+ f"Steps: {num_inference_steps}\n"
62
+ f"Sampler: Euler a\n"
63
+ f"CFG scale: {guidance_scale}\n"
64
+ f"Seed: {seed}\n"
65
+ f"Size: {resolution}"
66
+ )
67
+
68
+ return image, seed, gr.update(value=seed), gr.update(visible=True), generation_details
69
 
70
  def reset_inputs():
71
+ return (
72
+ gr.update(value=''),
73
+ gr.update(value=''),
74
+ gr.update(value=True),
75
+ gr.update(value='832x1216'),
76
+ gr.update(value=7),
77
+ gr.update(value=28),
78
+ gr.update(value=0),
79
+ gr.update(value=True),
80
+ gr.update(visible=False),
81
+ ""
82
+ )
83
 
84
  with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/miku@1.2.1") as demo:
85
  gr.HTML(
 
89
  with gr.Row():
90
  with gr.Column():
91
  prompt_input = gr.Textbox(lines=2, placeholder="Enter prompt here", label="Prompt")
92
+ negative_prompt_input = gr.Textbox(lines=2, placeholder="Enter negative prompt here", label="Negative Prompt")
93
  use_defaults_input = gr.Checkbox(label="Use Default Quality Tags and Negative Prompt", value=True)
94
  resolution_input = gr.Radio(
95
  choices=[
 
108
 
109
  with gr.Column():
110
  output_image = gr.Image(type="pil", label="Generated Image")
111
+ generation_details_output = gr.Markdown("", visible=False)
112
  gr.Markdown(
113
  """
114
  ### Recommended prompt formatting:
 
127
  inputs=[
128
  prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
129
  ],
130
+ outputs=[output_image, seed_input, gr.update(value=seed), generation_details_output]
131
  )
132
 
133
  reset_button.click(
134
  reset_inputs,
135
  inputs=[],
136
  outputs=[
137
+ prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input, generation_details_output, gr.update(value="")
138
  ]
139
  )
140
 
141
+ demo.queue(max_size=20).launch(share=False)