Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,855 Bytes
9b7b0f8 9669aec 7bdb7e9 2ce0115 7bdb7e9 9669aec 7bdb7e9 9669aec 2a25d35 7bdb7e9 9669aec 7bdb7e9 9669aec 7bdb7e9 9669aec 7bdb7e9 9669aec 7bdb7e9 9669aec 7bdb7e9 9669aec 7bdb7e9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import gradio as gr
import torch
import os
import glob
from datetime import datetime
from PIL import Image
from diffusers.utils import load_image
from diffusers import EulerDiscreteScheduler
from pipline_StableDiffusion_ConsistentID import ConsistentIDStableDiffusionPipeline
import spaces
zero = torch.Tensor([0]).cuda()
print(zero.device) # <-- 'cpu' 🤔
# Gets the absolute path of the current script
script_directory = os.path.dirname(os.path.realpath(__file__))
# download ConsistentID checkpoint to cache
consistentID_path = hf_hub_download(repo_id="JackAILab/ConsistentID", filename="ConsistentID-v1.bin", repo_type="model")
@spaces.GPU
def process(inputImage,prompt,negative_prompt):
device = zero.device # "cuda"
base_model_path = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
### Load base model
pipe = ConsistentIDStableDiffusionPipeline.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16"
).to(device)
### Load consistentID_model checkpoint
pipe.load_ConsistentID_model(
os.path.dirname(consistentID_path),
subfolder="",
weight_name=os.path.basename(consistentID_path),
trigger_word="img",
)
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
# hyper-parameter
select_images = load_image(Image.fromarray(inputImage))
num_steps = 50
merge_steps = 30
if prompt == "":
prompt = "A man, in a forest, adventuring"
if negative_prompt == "":
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality, blurry"
#Extend Prompt
prompt = "cinematic photo," + prompt + ", 50mm photograph, half-length portrait, film, bokeh, professional, 4k, highly detailed"
negtive_prompt_group="((((ugly)))), (((duplicate))), ((morbid)), ((mutilated)), [out of frame], extra fingers, mutated hands, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), ((ugly)), blurry, ((bad anatomy)), (((bad proportions))), ((extra limbs)), cloned face, (((disfigured))). out of frame, ugly, extra limbs, (bad anatomy), gross proportions, (malformed limbs), ((missing arms)), ((missing legs)), (((extra arms))), (((extra legs))), mutated hands, (fused fingers), (too many fingers), (((long neck)))"
negative_prompt = negative_prompt + negtive_prompt_group
seed = torch.randint(0, 1000, (1,)).item()
generator = torch.Generator(device=device).manual_seed(seed)
images = pipe(
prompt=prompt,
width=512,
height=512,
input_id_images=select_images,
negative_prompt=negative_prompt,
num_images_per_prompt=1,
num_inference_steps=num_steps,
start_merge_step=merge_steps,
generator=generator,
).images[0]
current_date = datetime.today()
output_dir = script_directory + f"/images/gradio_outputs"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
images.save(os.path.join(output_dir, f"{current_date}-{seed}.jpg"))
return os.path.join(output_dir, f"{current_date}-{seed}.jpg")
iface = gr.Interface(
fn=process,
inputs=[
gr.Image(label="Upload Image"),
gr.Textbox(label="prompt",placeholder="A man, in a forest, adventuring"),
gr.Textbox(label="negative prompt",placeholder="monochrome, lowres, bad anatomy, worst quality, low quality, blurry"),
],
outputs=[
gr.Image(label="Output"),
],
title="ConsistentID Demo",
description="Put reference portrait below"
)
iface.launch() # zero.device
# @spaces.GPU
# def greet(n):
# print(zero.device) # <-- 'cuda:0' 🤗
# return f"Hello {zero + n} Tensor"
# demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
# demo.launch()
|