Spaces:
Sleeping
Sleeping
import torch | |
import gradio as gr | |
import torch | |
import os | |
from PIL import Image | |
from torch import autocast | |
from perpneg_diffusion.perpneg_stable_diffusion.pipeline_perpneg_stable_diffusion import PerpStableDiffusionPipeline | |
has_cuda = torch.cuda.is_available() | |
device = torch.device('cpu' if not has_cuda else 'cuda') | |
print(device) | |
# initialize stable diffusion model | |
pipe = PerpStableDiffusionPipeline.from_pretrained( | |
"CompVis/stable-diffusion-v1-4", | |
# use_auth_token=True | |
).to(device) | |
def dummy(images, **kwargs): | |
return images, False | |
pipe.safety_checker = dummy | |
examples = [ | |
[ | |
"an armchair in the shape of an avocado | cushion in the armchair", | |
"1 | -0.3", | |
"145", | |
"7.5" | |
], | |
[ | |
"an armchair in the shape of an avocado", | |
"1", | |
"145", | |
"7.5" | |
], | |
[ | |
"a peacock, back view | a peacock, front view", | |
"1 | -3.5", | |
"30", | |
"7.5" | |
], | |
[ | |
"a peacock, back view", | |
"1", | |
"30", | |
"7.5" | |
], | |
[ | |
"A boy wearing sunglasses | a pair of sunglasses with white frame", | |
"1 | -0.35", | |
"200", | |
"11" | |
], | |
[ | |
"A boy wearing sunglasses", | |
"1", | |
"200", | |
"11", | |
], | |
[ | |
"a photo of an astronaut riding a horse | a jumping horse | a white horse", | |
"1 | -0.3 | -0.1", | |
"1988", | |
"10" | |
], | |
[ | |
"a photo of an astronaut riding a horse | a jumping horse", | |
"1 | -0.3", | |
"1988", | |
"10" | |
], | |
[ | |
"a photo of an astronaut riding a horse", | |
"1", | |
"1988", | |
"10" | |
], | |
] | |
def predict(prompt, weights, seed, scale=7.5, steps=50): | |
try: | |
with torch.no_grad(): | |
has_cuda = torch.cuda.is_available() | |
with autocast('cpu' if not has_cuda else 'cuda'): | |
if has_cuda: | |
generator = torch.Generator('cuda').manual_seed(int(seed)) | |
else: | |
generator = torch.Generator().manual_seed(int(seed)) | |
image_perpneg = pipe(prompt, guidance_scale=float(scale), generator=generator, | |
num_inference_steps=steps, weights=weights)["images"][0] | |
return image_perpneg | |
except Exception as e: | |
print(e) | |
return None | |
app = gr.Blocks() | |
with app: | |
# gr.Markdown( | |
# "# **<p align='center'>AMLDS Video Tagging</p>**" | |
# ) | |
gr.Markdown( | |
"# **<p align='center'>Perp-Neg: Iterative Editing and Robust View Generation.</p>**" | |
) | |
gr.Markdown( | |
""" | |
### **<p align='center'>Demo created by Huangjie Zheng and Reza Armandpour</p>**. | |
""" | |
) | |
with gr.Row(): | |
with gr.Column(): | |
# with gr.Tab(label="FUll prompt"): | |
gr.Markdown( | |
"### **Provide a list of prompts and their weights separated by | **" | |
) | |
prompt = gr.Textbox(label="List of prompts:", show_label=True) | |
weights = gr.Textbox( | |
label="List of weights:", show_label=True | |
) | |
seed = gr.Textbox( | |
label="Seed:", show_label=True | |
) | |
scale = gr.Textbox( | |
label="Guidance scale:", show_label=True | |
) | |
image_gen_btn = gr.Button(value="Generate") | |
with gr.Column(): | |
img_output = gr.Image( | |
label="Generated Image", | |
show_label=True, | |
) | |
gr.Markdown("**Examples:**") | |
gr.Examples( | |
examples, | |
[prompt, weights, seed, scale], | |
[img_output], | |
fn=predict, | |
cache_examples=False, | |
) | |
image_gen_btn.click( | |
predict, | |
inputs=[prompt, weights, seed, scale], | |
outputs=[img_output], | |
) | |
gr.Markdown(""" | |
\n This is the repository for using Perp-Neg sampling with Stable Diffusion model, as presented in [Re-imagine the Negative Prompt Algorithm: Transform 2D Diffusion into 3D, alleviate Janus problem and Beyond.](https://Perp-Neg.github.i). | |
""") | |
gr.Markdown( | |
""" | |
\n Demo created by: Huangjie Zheng and Reza Armandpour</a>. | |
""" | |
) | |
app.launch() | |