File size: 3,484 Bytes
aa69275 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
from modules.utils import *
class Image2Hed:
def __init__(self, device, pretrained_model_dir):
print("Initializing Image2Hed")
self.detector = HEDdetector.from_pretrained(f'{pretrained_model_dir}/ControlNet')
@prompts(name="Hed Detection On Image",
description="useful when you want to detect the soft hed boundary of the image. "
"like: detect the soft hed boundary of this image, or hed boundary detection on image, "
"or peform hed boundary detection on this image, or detect soft hed boundary image of this image. "
"The input to this tool should be a string, representing the image_path")
def inference(self, inputs):
image = Image.open(inputs)
hed = self.detector(image)
updated_image_path = get_new_image_name(inputs, func_name="hed-boundary")
hed.save(updated_image_path)
print(f"\nProcessed Image2Hed, Input Image: {inputs}, Output Hed: {updated_image_path}")
return updated_image_path
class HedText2Image:
def __init__(self, device, pretrained_model_dir):
print("Initializing HedText2Image to %s" % device)
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.controlnet = ControlNetModel.from_pretrained(f"{pretrained_model_dir}/sd-controlnet-hed",
torch_dtype=self.torch_dtype)
self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
f"{pretrained_model_dir}/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
torch_dtype=self.torch_dtype
)
self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
self.pipe.to(device)
self.seed = -1
self.a_prompt = 'best quality, extremely detailed'
self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
'fewer digits, cropped, worst quality, low quality'
@prompts(name="Generate Image Condition On Soft Hed Boundary Image",
description="useful when you want to generate a new real image from both the user desciption "
"and a soft hed boundary image. "
"like: generate a real image of a object or something from this soft hed boundary image, "
"or generate a new real image of a object or something from this hed boundary. "
"The input to this tool should be a comma seperated string of two, "
"representing the image_path and the user description")
def inference(self, inputs):
image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
image = Image.open(image_path)
self.seed = random.randint(0, 65535)
seed_everything(self.seed)
prompt = instruct_text + ', ' + self.a_prompt
image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
guidance_scale=9.0).images[0]
updated_image_path = get_new_image_name(image_path, func_name="hed2image")
image.save(updated_image_path)
print(f"\nProcessed HedText2Image, Input Hed: {image_path}, Input Text: {instruct_text}, "
f"Output Image: {updated_image_path}")
return updated_image_path |