File size: 6,644 Bytes
8226b60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import gradio as gr
from diffusers import AutoPipelineForText2Image
import numpy as np
import math
import spaces 
import torch 
import random

theme = gr.themes.Base(
    font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
)

pipe_xlc = AutoPipelineForText2Image.from_pretrained(
    "temp-org-cc/CommonCanvas-XLC",
    custom_pipeline="multimodalart/sdxl_perturbed_attention_guidance",
    torch_dtype=torch.float16
)
pipe_xlnc = AutoPipelineForText2Image.from_pretrained(
    "temp-org-cc/CommonCanvas-XLNC",
    custom_pipeline="multimodalart/sdxl_perturbed_attention_guidance",
    torch_dtype=torch.float16
)
pipe_sc = AutoPipelineForText2Image.from_pretrained(
    "temp-org-cc/CommonCanvas-SC",
    custom_pipeline="hyoungwoncho/sd_perturbed_attention_guidance",
    torch_dtype=torch.float16
)
pipe_snc = AutoPipelineForText2Image.from_pretrained(
    "temp-org-cc/CommonCanvas-SNC",
    custom_pipeline="hyoungwoncho/sd_perturbed_attention_guidance",
    torch_dtype=torch.float16
)

device="cuda"
pipe = pipe.to(device)

@spaces.GPU
def run_xlc(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pag_layers=["mid"], randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
    if(randomize_seed):
        seed = random.randint(0, 9007199254740991)
    
    generator = torch.Generator(device="cuda").manual_seed(seed)
    image = pipe_xlc(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, pag_scale=pag_scale, pag_applied_layers=pag_layers, generator=generator, num_inference_steps=25).images[0]    
    
    return image, seed

@spaces.GPU
def run_xlnc(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pag_layers=["mid"], randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
    if(randomize_seed):
        seed = random.randint(0, 9007199254740991)
    
    generator = torch.Generator(device="cuda").manual_seed(seed)
    image = pipe_xlnc(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, pag_scale=pag_scale, pag_applied_layers=pag_layers, generator=generator, num_inference_steps=25).images[0]    
    
    return image, seed

@spaces.GPU
def run_sc(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pag_layers=["mid"], randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
    if(randomize_seed):
        seed = random.randint(0, 9007199254740991)
    
    generator = torch.Generator(device="cuda").manual_seed(seed)
    image = pipe_sc(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, pag_scale=pag_scale, pag_applied_layers=pag_layers, generator=generator, num_inference_steps=25).images[0]    
    
    return image, seed

def run_snc(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pag_layers=["mid"], randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
    if(randomize_seed):
        seed = random.randint(0, 9007199254740991)
    
    generator = torch.Generator(device="cuda").manual_seed(seed)
    image = pipe_sc(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, pag_scale=pag_scale, pag_applied_layers=pag_layers, generator=generator, num_inference_steps=25).images[0]    
    
    return image, seed

css = '''
.gradio-container{
max-width: 768px !important;
margin: 0 auto;
}
'''

with gr.Blocks(css=css, theme=theme) as demo:
    gr.Markdown('''# CommonCanvas
    Demo for the CommonCanvas suite of models trained on the CommonCatalogue, a dataset with ~70M images dedicated to the Creative Commons
    ''')
    with gr.Group():
      with gr.Tab("CommonCanvas XLC"):
          with gr.Row():
            prompt_xlc = gr.Textbox(show_label=False, scale=4, placeholder="Your prompt")
            button_xlc = gr.Button("Generate", min_width=120)
      with gr.Tab("CommonCanvas XLNC"):
          with gr.Row():
            prompt_xlnc = gr.Textbox(show_label=False, scale=4, placeholder="Your prompt")
            button_xlnc = gr.Button("Generate", min_width=120)
      with gr.Tab("CommonCanvas SC"):
            prompt_sc = gr.Textbox(show_label=False, scale=4, placeholder="Your prompt")
            button_sc = gr.Button("Generate", min_width=120)
      with gr.Tab("CommonCanvas SNC"):
            prompt_snc = gr.Textbox(show_label=False, scale=4, placeholder="Your prompt")
            button_snc = gr.Button("Generate", min_width=120)  
      output = gr.Image(label="Your result", interactive=False)
      with gr.Accordion("Advanced Settings", open=False):
        guidance_scale = gr.Number(label="CFG Guidance Scale", info="The guidance scale for CFG, ignored if no prompt is entered (unconditional generation)", value=7.0)
        negative_prompt = gr.Textbox(label="Negative prompt", info="Is only applied for the CFG part, leave blank for unconditional generation")
        pag_scale = gr.Number(label="Pag Scale", value=3.0)
        pag_layers = gr.Dropdown(label="Model layers to apply Pag to", info="mid is the one used on the paper, up and down blocks seem unstable", choices=["up", "mid", "down"], multiselect=True, value="mid")
        randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
        seed = gr.Slider(minimum=1, maximum=9007199254740991, step=1, randomize=True)
    gr.Examples(fn=run, examples=[" ", "an insect robot preparing a delicious meal, anime style", "a photo of a group of friends at an amusement park"], inputs=prompt, outputs=[output, seed], cache_examples=True)
    gr.on(
        triggers=[
            button_xlc.click,
            prompt_xlc.submit
        ],
        fn=run_xlc,
        inputs=[prompt_xlc, negative_prompt, guidance_scale, pag_scale, pag_layers, randomize_seed, seed],
        outputs=[output, seed],
    )
    gr.on(
        triggers=[
            button_xlnc.click,
            prompt_xlnc.submit
        ],
        fn=run_xlnc,
        inputs=[prompt_xlnc, negative_prompt, guidance_scale, pag_scale, pag_layers, randomize_seed, seed],
        outputs=[output, seed],
    )
    gr.on(
        triggers=[
            button_sc.click,
            prompt_sc.submit
        ],
        fn=run_sc,
        inputs=[prompt_sc, negative_prompt, guidance_scale, pag_scale, pag_layers, randomize_seed, seed],
        outputs=[output, seed],
    )
    gr.on(
        triggers=[
            button_snc.click,
            prompt_snc.submit
        ],
        fn=run_sc,
        inputs=[prompt_snc, negative_prompt, guidance_scale, pag_scale, pag_layers, randomize_seed, seed],
        outputs=[output, seed],
    )
if __name__ == "__main__":
    demo.launch(share=True)