Spaces:
Sleeping
Sleeping
import os | |
# os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' | |
# os.environ['CUDA_VISIBLE_DEVICES'] = '2' | |
# os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "caching_allocator" | |
import gradio as gr | |
import numpy as np | |
from models import make_inpainting | |
import utils | |
from transformers import MaskFormerImageProcessor, MaskFormerForInstanceSegmentation | |
from PIL import Image | |
import requests | |
from transformers import pipeline | |
import torch | |
import random | |
import io | |
import base64 | |
import json | |
from diffusers import DiffusionPipeline | |
from diffusers import StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline | |
from diffusers import StableDiffusionUpscalePipeline | |
from diffusers import LDMSuperResolutionPipeline | |
def removeFurniture(input_img1, | |
input_img2, | |
positive_prompt, | |
negative_prompt, | |
num_of_images, | |
resolution | |
): | |
print("removeFurniture") | |
HEIGHT = resolution | |
WIDTH = resolution | |
input_img1 = input_img1.resize((resolution, resolution)) | |
input_img2 = input_img2.resize((resolution, resolution)) | |
canvas_mask = np.array(input_img2) | |
mask = utils.get_mask(canvas_mask) | |
print(input_img1, mask, positive_prompt, negative_prompt) | |
retList= make_inpainting(positive_prompt=positive_prompt, | |
image=input_img1, | |
mask_image=mask, | |
negative_prompt=negative_prompt, | |
num_of_images=num_of_images, | |
resolution=resolution | |
) | |
# add the rest up to 10 | |
while (len(retList)<10): | |
retList.append(None) | |
return retList | |
def imageToString(img): | |
output = io.BytesIO() | |
img.save(output, format="png") | |
return output.getvalue() | |
def segmentation(img): | |
print("segmentation") | |
# semantic_segmentation = pipeline("image-segmentation", "nvidia/segformer-b1-finetuned-cityscapes-1024-1024") | |
pipe = pipeline("image-segmentation", "facebook/maskformer-swin-large-ade") | |
results = pipe(img) | |
for p in results: | |
p['mask'] = utils.image_to_byte_array(p['mask']) | |
p['mask'] = base64.b64encode(p['mask']).decode("utf-8") | |
#print(results) | |
return json.dumps(results) | |
def upscale(image, prompt): | |
print("upscale",image,prompt) | |
# image.thumbnail((512, 512)) | |
# print("resize",image) | |
# pipeline = StableDiffusionUpscalePipeline.from_pretrained("stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16) | |
# pipeline = pipeline.to("cuda") | |
pipeline = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16).to('cuda') | |
ret = pipeline(prompt=prompt, | |
image=image, | |
num_inference_steps=10, | |
guidance_scale=0) | |
print("ret",ret) | |
upscaled_image = ret.images[0] | |
print("up",upscaled_image) | |
return upscaled_image | |
def upscale2(image, prompt): | |
print("upscale2",image,prompt) | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
pipeline = LDMSuperResolutionPipeline.from_pretrained("CompVis/ldm-super-resolution-4x-openimages", torch_dtype=torch.float16) | |
pipeline = pipeline.to(device) | |
upscaled_image = pipeline(image, num_inference_steps=10, eta=1).images[0] | |
return upscaled_image | |
with gr.Blocks() as app: | |
with gr.Row(): | |
with gr.Column(): | |
gr.Button("FurnituRemove").click(removeFurniture, | |
inputs=[gr.Image(label="img", type="pil"), | |
gr.Image(label="mask", type="pil"), | |
gr.Textbox(label="positive_prompt",value="empty room"), | |
gr.Textbox(label="negative_prompt",value=""), | |
gr.Number(label="num_of_images",value=2), | |
gr.Number(label="resolution",value=512) | |
], | |
outputs=[ | |
gr.Image(), | |
gr.Image(), | |
gr.Image(), | |
gr.Image(), | |
gr.Image(), | |
gr.Image(), | |
gr.Image(), | |
gr.Image(), | |
gr.Image(), | |
gr.Image()]) | |
with gr.Column(): | |
gr.Button("Segmentation").click(segmentation, inputs=gr.Image(type="pil"), outputs=gr.JSON()) | |
with gr.Column(): | |
gr.Button("Upscale").click(upscale, inputs=[gr.Image(type="pil"),gr.Textbox(label="prompt",value="empty room")], outputs=gr.Image()) | |
with gr.Column(): | |
gr.Button("Upscale2").click(upscale2, inputs=[gr.Image(type="pil"),gr.Textbox(label="prompt",value="empty room")], outputs=gr.Image()) | |
app.launch(debug=True,share=True) | |
# UP 1 |