AnyDiffuse / config.py
zerhero's picture
add bytedance lighting model
54d6d02
# Default value
DEFAULT_STEPS = 23
DEFAULT_CFG = 0
MINIMUM_IMAGE_NUMBER = 1
MAXIMUM_IMAGE_NUMBER = 10
DEFAULT_POSITIVE_PROMPT = """1girl wearing basketball jersey and short pants, full body, basketball court, sweaty, high detailed, sunny, day light, hdr,
score_9, score_8_up, score_7_up, very aesthetic,
layered, white hair, featuring soft waves and a slight outward curl at the ends,
parted in the middle, (short hair),
red glowing eyes, beautiful hazel red eyes, highly detailed eyes, thin eyebrows,
detailed black eyebrows,
long eyelashes,
detailed kornea,
blush,
parted lips, gorgeous lips, pink thin lips,
perfect anatomy,
"""
DEFAULT_NEGATIVE_PROMPT = """easynegative,lowres,bad anatomy,bad hands,text,error,missing fingers,extra digit,
fewer digits, worst quality, low quality, normal quality,jpeg artifacts,signature,watermark,
username,blurry,lowres graffiti,low quality lowres simple background"""
task_stablepy: dict = {
'txt2img': 'txt2img',
'img2img': 'img2img',
'inpaint': 'inpaint',
# 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0
# 'sketch T2I Adapter': 'sdxl_sketch_t2i',
# 'lineart T2I Adapter': 'sdxl_lineart_t2i',
# 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i',
# 'openpose T2I Adapter': 'sdxl_openpose_t2i',
'openpose ControlNet': 'openpose',
'canny ControlNet': 'canny',
'mlsd ControlNet': 'mlsd',
'scribble ControlNet': 'scribble',
'softedge ControlNet': 'softedge',
'segmentation ControlNet': 'segmentation',
'depth ControlNet': 'depth',
'normalbae ControlNet': 'normalbae',
'lineart ControlNet': 'lineart',
# 'lineart_anime ControlNet': 'lineart_anime',
'shuffle ControlNet': 'shuffle',
'ip2p ControlNet': 'ip2p',
'optical pattern ControlNet': 'pattern',
'tile realistic': 'sdxl_tile_realistic',
}