tsungtao's picture
Update app.py
8142232
raw
history blame
2.36 kB
import gradio as gr
import jax
import numpy as np
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
#from diffusers.utils import load_image
from diffusers.utils.testing_utils import load_image
from PIL import Image
from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel
def image_grid(imgs, rows, cols):
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
def create_key(seed=0):
return jax.random.PRNGKey(seed)
rng = create_key(0)
canny_image = load_image(
"https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_10_output_0.jpeg"
)
prompts = "a living room with tv, sea, window"
negative_prompts = "fan "
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
"tsungtao/controlnet-mlsd-202305011046", from_flax=True, dtype=jnp.float32
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32
)
params["controlnet"] = controlnet_params
num_samples = jax.device_count()
rng = jax.random.split(rng, jax.device_count())
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples)
processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)
p_params = replicate(params)
prompt_ids = shard(prompt_ids)
negative_prompt_ids = shard(negative_prompt_ids)
processed_image = shard(processed_image)
output = pipe(
prompt_ids=prompt_ids,
image=processed_image,
params=p_params,
prng_seed=rng,
num_inference_steps=50,
neg_prompt_ids=negative_prompt_ids,
jit=True,
).images
output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))
output_images = image_grid(output_images, num_samples // 4, 4)
output_images.save("tao/image.png")
#gr.Interface.load("models/tsungtao/controlnet-mlsd-202305011046").launch()
def infer(prompt, negative_prompt, image):
# implement your inference function here
return output_image
gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image").launch()