LeviathAnjelo commited on
Commit
317b8e3
1 Parent(s): 40022a0

Add 5 files

Browse files
Files changed (5) hide show
  1. app.py +237 -0
  2. gitattributes +35 -0
  3. model.py +114 -0
  4. pipeline.py +471 -0
  5. style_template.py +59 -0
app.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import random
4
+ import os
5
+
6
+ from diffusers.utils import load_image
7
+ from diffusers import DDIMScheduler
8
+
9
+ from huggingface_hub import hf_hub_download
10
+ import spaces
11
+ import gradio as gr
12
+
13
+ from pipeline import PhotoMakerStableDiffusionXLPipeline
14
+ from style_template import styles
15
+
16
+ # global variable
17
+ base_model_path = 'SG161222/RealVisXL_V3.0'
18
+ device = "cuda" if torch.cuda.is_available() else "cpu"
19
+ MAX_SEED = np.iinfo(np.int32).max
20
+ STYLE_NAMES = list(styles.keys())
21
+ DEFAULT_STYLE_NAME = "Photographic (Default)"
22
+
23
+ # download PhotoMaker checkpoint to cache
24
+ photomaker_ckpt = hf_hub_download(repo_id="TencentARC/PhotoMaker", filename="photomaker-v1.bin", repo_type="model")
25
+
26
+ pipe = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
27
+ base_model_path,
28
+ torch_dtype=torch.bfloat16,
29
+ use_safetensors=True,
30
+ variant="fp16",
31
+ ).to(device)
32
+
33
+ pipe.load_photomaker_adapter(
34
+ os.path.dirname(photomaker_ckpt),
35
+ subfolder="",
36
+ weight_name=os.path.basename(photomaker_ckpt),
37
+ trigger_word="img"
38
+ )
39
+ pipe.id_encoder.to(device)
40
+
41
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
42
+ # pipe.set_adapters(["photomaker"], adapter_weights=[1.0])
43
+ pipe.fuse_lora()
44
+
45
+ @spaces.GPU
46
+ def generate_image(upload_images, prompt, negative_prompt, style_name, num_steps, style_strength_ratio, num_outputs, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
47
+ # check the trigger word
48
+ image_token_id = pipe.tokenizer.convert_tokens_to_ids(pipe.trigger_word)
49
+ input_ids = pipe.tokenizer.encode(prompt)
50
+ if image_token_id not in input_ids:
51
+ raise gr.Error(f"Cannot find the trigger word '{pipe.trigger_word}' in text prompt! Please refer to step 2️⃣")
52
+
53
+ if input_ids.count(image_token_id) > 1:
54
+ raise gr.Error(f"Cannot use multiple trigger words '{pipe.trigger_word}' in text prompt!")
55
+
56
+ # apply the style template
57
+ prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
58
+
59
+ # Update nsfw negative prompt
60
+ negative_prompt = f"nsfw, naked, {negative_prompt}"
61
+ if upload_images is None:
62
+ raise gr.Error(f"Cannot find any input face image! Please refer to step 1️⃣")
63
+
64
+ input_id_images = []
65
+ for img in upload_images:
66
+ input_id_images.append(load_image(img))
67
+
68
+ generator = torch.Generator(device=device).manual_seed(seed)
69
+
70
+ print("Start inference...")
71
+ print(f"[Debug] Prompt: {prompt}, \n[Debug] Neg Prompt: {negative_prompt}")
72
+ start_merge_step = int(float(style_strength_ratio) / 100 * num_steps)
73
+ if start_merge_step > 30:
74
+ start_merge_step = 30
75
+ print(start_merge_step)
76
+ images = pipe(
77
+ prompt=prompt,
78
+ input_id_images=input_id_images,
79
+ negative_prompt=negative_prompt,
80
+ num_images_per_prompt=num_outputs,
81
+ num_inference_steps=num_steps,
82
+ start_merge_step=start_merge_step,
83
+ generator=generator,
84
+ guidance_scale=guidance_scale,
85
+ ).images
86
+ return images, gr.update(visible=True)
87
+
88
+ def swap_to_gallery(images):
89
+ return gr.update(value=images, visible=True), gr.update(visible=True), gr.update(visible=False)
90
+
91
+ def upload_example_to_gallery(images, prompt, style, negative_prompt):
92
+ return gr.update(value=images, visible=True), gr.update(visible=True), gr.update(visible=False)
93
+
94
+ def remove_back_to_files():
95
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
96
+
97
+ def remove_tips():
98
+ return gr.update(visible=False)
99
+
100
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
101
+ if randomize_seed:
102
+ seed = random.randint(0, MAX_SEED)
103
+ return seed
104
+
105
+ def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
106
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
107
+ return p.replace("{prompt}", positive), n + ' ' + negative
108
+
109
+ def get_image_path_list(folder_name):
110
+ image_basename_list = os.listdir(folder_name)
111
+ image_path_list = sorted([os.path.join(folder_name, basename) for basename in image_basename_list])
112
+ return image_path_list
113
+
114
+ def get_example():
115
+ case = [
116
+ [
117
+ get_image_path_list('./examples/scarletthead_woman'),
118
+ "instagram photo, portrait photo of a woman img, colorful, perfect face, natural skin, hard shadows, film grain",
119
+ "(No style)",
120
+ "(asymmetry, worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth",
121
+ ],
122
+ [
123
+ get_image_path_list('./examples/newton_man'),
124
+ "sci-fi, closeup portrait photo of a man img wearing the sunglasses in Iron man suit, face, slim body, high quality, film grain",
125
+ "(No style)",
126
+ "(asymmetry, worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth",
127
+ ],
128
+ ]
129
+ return case
130
+
131
+
132
+ tips = r""" """
133
+ # 4. When generating realistic photos, if it's not real enough, try switching to our other gradio application [PhotoMaker-Realistic]().
134
+
135
+ css = '''
136
+ .gradio-container {width: 85% !important}
137
+ '''
138
+ with gr.Blocks(css=css) as demo:
139
+ gr.Markdown(logo)
140
+ gr.Markdown(title)
141
+ gr.Markdown(description)
142
+ # gr.DuplicateButton(
143
+ # value="Duplicate Space for private use ",
144
+ # elem_id="duplicate-button",
145
+ # visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
146
+ # )
147
+ with gr.Row():
148
+ with gr.Column():
149
+ files = gr.File(
150
+ label="Drag (Select) 1 or more photos of your face",
151
+ file_types=["image"],
152
+ file_count="multiple"
153
+ )
154
+ uploaded_files = gr.Gallery(label="Your images", visible=False, columns=5, rows=1, height=200)
155
+ with gr.Column(visible=False) as clear_button:
156
+ remove_and_reupload = gr.ClearButton(value="Remove and upload new ones", components=files, size="sm")
157
+ prompt = gr.Textbox(label="Prompt",
158
+ info="Try something like 'a photo of a man/woman img', 'img' is the trigger word.",
159
+ placeholder="A photo of a [man/woman img]...")
160
+ style = gr.Dropdown(label="Style template", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
161
+ submit = gr.Button("Submit")
162
+
163
+ with gr.Accordion(open=False, label="Advanced Options"):
164
+ negative_prompt = gr.Textbox(
165
+ label="Negative Prompt",
166
+ placeholder="low quality",
167
+ value="nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry",
168
+ )
169
+ num_steps = gr.Slider(
170
+ label="Number of sample steps",
171
+ minimum=20,
172
+ maximum=100,
173
+ step=1,
174
+ value=50,
175
+ )
176
+ style_strength_ratio = gr.Slider(
177
+ label="Style strength (%)",
178
+ minimum=15,
179
+ maximum=50,
180
+ step=1,
181
+ value=20,
182
+ )
183
+ num_outputs = gr.Slider(
184
+ label="Number of output images",
185
+ minimum=1,
186
+ maximum=4,
187
+ step=1,
188
+ value=2,
189
+ )
190
+ guidance_scale = gr.Slider(
191
+ label="Guidance scale",
192
+ minimum=0.1,
193
+ maximum=10.0,
194
+ step=0.1,
195
+ value=5,
196
+ )
197
+ seed = gr.Slider(
198
+ label="Seed",
199
+ minimum=0,
200
+ maximum=MAX_SEED,
201
+ step=1,
202
+ value=0,
203
+ )
204
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
205
+ with gr.Column():
206
+ gallery = gr.Gallery(label="Generated Images")
207
+ usage_tips = gr.Markdown(label="Usage tips of PhotoMaker", value=tips ,visible=False)
208
+
209
+ files.upload(fn=swap_to_gallery, inputs=files, outputs=[uploaded_files, clear_button, files])
210
+ remove_and_reupload.click(fn=remove_back_to_files, outputs=[uploaded_files, clear_button, files])
211
+
212
+ submit.click(
213
+ fn=remove_tips,
214
+ outputs=usage_tips,
215
+ ).then(
216
+ fn=randomize_seed_fn,
217
+ inputs=[seed, randomize_seed],
218
+ outputs=seed,
219
+ queue=False,
220
+ api_name=False,
221
+ ).then(
222
+ fn=generate_image,
223
+ inputs=[files, prompt, negative_prompt, style, num_steps, style_strength_ratio, num_outputs, guidance_scale, seed],
224
+ outputs=[gallery, usage_tips]
225
+ )
226
+
227
+ gr.Examples(
228
+ examples=get_example(),
229
+ inputs=[files, prompt, style, negative_prompt],
230
+ run_on_click=True,
231
+ fn=upload_example_to_gallery,
232
+ outputs=[uploaded_files, clear_button, files],
233
+ )
234
+
235
+ gr.Markdown(article)
236
+
237
+ demo.launch()
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
model.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Merge image encoder and fuse module to create a ID Encoder
2
+ # send multiple ID images, we can directly obtain the updated text encoder containing a stacked ID embedding
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from transformers.models.clip.modeling_clip import CLIPVisionModelWithProjection
7
+ from transformers.models.clip.configuration_clip import CLIPVisionConfig
8
+ from transformers import PretrainedConfig
9
+
10
+ VISION_CONFIG_DICT = {
11
+ "hidden_size": 1024,
12
+ "intermediate_size": 4096,
13
+ "num_attention_heads": 16,
14
+ "num_hidden_layers": 24,
15
+ "patch_size": 14,
16
+ "projection_dim": 768
17
+ }
18
+
19
+ class MLP(nn.Module):
20
+ def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True):
21
+ super().__init__()
22
+ if use_residual:
23
+ assert in_dim == out_dim
24
+ self.layernorm = nn.LayerNorm(in_dim)
25
+ self.fc1 = nn.Linear(in_dim, hidden_dim)
26
+ self.fc2 = nn.Linear(hidden_dim, out_dim)
27
+ self.use_residual = use_residual
28
+ self.act_fn = nn.GELU()
29
+
30
+ def forward(self, x):
31
+ residual = x
32
+ x = self.layernorm(x)
33
+ x = self.fc1(x)
34
+ x = self.act_fn(x)
35
+ x = self.fc2(x)
36
+ if self.use_residual:
37
+ x = x + residual
38
+ return x
39
+
40
+
41
+ class FuseModule(nn.Module):
42
+ def __init__(self, embed_dim):
43
+ super().__init__()
44
+ self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False)
45
+ self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True)
46
+ self.layer_norm = nn.LayerNorm(embed_dim)
47
+
48
+ def fuse_fn(self, prompt_embeds, id_embeds):
49
+ print(prompt_embeds.shape, id_embeds.shape)
50
+ stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1)
51
+ stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds
52
+ stacked_id_embeds = self.mlp2(stacked_id_embeds)
53
+ stacked_id_embeds = self.layer_norm(stacked_id_embeds)
54
+ return stacked_id_embeds
55
+
56
+ def forward(
57
+ self,
58
+ prompt_embeds,
59
+ id_embeds,
60
+ class_tokens_mask,
61
+ ) -> torch.Tensor:
62
+ # id_embeds shape: [b, max_num_inputs, 1, 2048]
63
+ id_embeds = id_embeds.to(prompt_embeds.dtype)
64
+ num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case
65
+ batch_size, max_num_inputs = id_embeds.shape[:2]
66
+ # seq_length: 77
67
+ seq_length = prompt_embeds.shape[1]
68
+ # flat_id_embeds shape: [b*max_num_inputs, 1, 2048]
69
+ flat_id_embeds = id_embeds.view(
70
+ -1, id_embeds.shape[-2], id_embeds.shape[-1]
71
+ )
72
+ # valid_id_mask [b*max_num_inputs]
73
+ valid_id_mask = (
74
+ torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :]
75
+ < num_inputs[:, None]
76
+ )
77
+ valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()]
78
+
79
+ prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1])
80
+ class_tokens_mask = class_tokens_mask.view(-1)
81
+ valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1])
82
+ # slice out the image token embeddings
83
+ image_token_embeds = prompt_embeds[class_tokens_mask]
84
+ stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds)
85
+ assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}"
86
+ prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype))
87
+ updated_prompt_embeds = prompt_embeds.view(batch_size, seq_length, -1)
88
+ return updated_prompt_embeds
89
+
90
+ class PhotoMakerIDEncoder(CLIPVisionModelWithProjection):
91
+ def __init__(self):
92
+ super().__init__(CLIPVisionConfig(**VISION_CONFIG_DICT))
93
+ self.visual_projection_2 = nn.Linear(1024, 1280, bias=False)
94
+ self.fuse_module = FuseModule(2048)
95
+
96
+ def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask):
97
+ b, num_inputs, c, h, w = id_pixel_values.shape
98
+ id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w)
99
+
100
+ shared_id_embeds = self.vision_model(id_pixel_values)[1]
101
+ id_embeds = self.visual_projection(shared_id_embeds)
102
+ id_embeds_2 = self.visual_projection_2(shared_id_embeds)
103
+
104
+ id_embeds = id_embeds.view(b, num_inputs, 1, -1)
105
+ id_embeds_2 = id_embeds_2.view(b, num_inputs, 1, -1)
106
+
107
+ id_embeds = torch.cat((id_embeds, id_embeds_2), dim=-1)
108
+ updated_prompt_embeds = self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask)
109
+
110
+ return updated_prompt_embeds
111
+
112
+
113
+ if __name__ == "__main__":
114
+ PhotoMakerIDEncoder()
pipeline.py ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, List, Optional, Union, Tuple
2
+ from collections import OrderedDict
3
+ import os
4
+ import PIL
5
+ import numpy as np
6
+
7
+ import torch
8
+ from torchvision import transforms as T
9
+
10
+ from safetensors import safe_open
11
+ from huggingface_hub.utils import validate_hf_hub_args
12
+ from transformers import CLIPImageProcessor, CLIPTokenizer
13
+ from diffusers import StableDiffusionXLPipeline
14
+ from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
15
+ from diffusers.utils import (
16
+ _get_model_file,
17
+ is_transformers_available,
18
+ logging,
19
+ )
20
+
21
+ from model import PhotoMakerIDEncoder
22
+
23
+ PipelineImageInput = Union[
24
+ PIL.Image.Image,
25
+ torch.FloatTensor,
26
+ List[PIL.Image.Image],
27
+ List[torch.FloatTensor],
28
+ ]
29
+
30
+
31
+ class PhotoMakerStableDiffusionXLPipeline(StableDiffusionXLPipeline):
32
+ @validate_hf_hub_args
33
+ def load_photomaker_adapter(
34
+ self,
35
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
36
+ weight_name: str,
37
+ subfolder: str = '',
38
+ trigger_word: str = 'img',
39
+ **kwargs,
40
+ ):
41
+ """
42
+ #TODO
43
+ Parameters:
44
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
45
+ Can be either:
46
+
47
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
48
+ the Hub.
49
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
50
+ with [`ModelMixin.save_pretrained`].
51
+ - A [torch state
52
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
53
+
54
+ weight_name (`str`):
55
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
56
+
57
+ subfolder (`str`, defaults to `""`):
58
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
59
+
60
+ trigger_word (`str`, *optional*, defaults to `"img"`):
61
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
62
+ """
63
+
64
+ # Load the main state dict first.
65
+ cache_dir = kwargs.pop("cache_dir", None)
66
+ force_download = kwargs.pop("force_download", False)
67
+ resume_download = kwargs.pop("resume_download", False)
68
+ proxies = kwargs.pop("proxies", None)
69
+ local_files_only = kwargs.pop("local_files_only", None)
70
+ token = kwargs.pop("token", None)
71
+ revision = kwargs.pop("revision", None)
72
+
73
+ user_agent = {
74
+ "file_type": "attn_procs_weights",
75
+ "framework": "pytorch",
76
+ }
77
+
78
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
79
+ model_file = _get_model_file(
80
+ pretrained_model_name_or_path_or_dict,
81
+ weights_name=weight_name,
82
+ cache_dir=cache_dir,
83
+ force_download=force_download,
84
+ resume_download=resume_download,
85
+ proxies=proxies,
86
+ local_files_only=local_files_only,
87
+ token=token,
88
+ revision=revision,
89
+ subfolder=subfolder,
90
+ user_agent=user_agent,
91
+ )
92
+ if weight_name.endswith(".safetensors"):
93
+ state_dict = {"id_encoder": {}, "lora_weights": {}}
94
+ with safe_open(model_file, framework="pt", device="cpu") as f:
95
+ for key in f.keys():
96
+ if key.startswith("id_encoder."):
97
+ state_dict["id_encoder"][key.replace("id_encoder.", "")] = f.get_tensor(key)
98
+ elif key.startswith("lora_weights."):
99
+ state_dict["lora_weights"][key.replace("lora_weights.", "")] = f.get_tensor(key)
100
+ else:
101
+ state_dict = torch.load(model_file, map_location="cpu")
102
+ else:
103
+ state_dict = pretrained_model_name_or_path_or_dict
104
+
105
+ keys = list(state_dict.keys())
106
+ if keys != ["id_encoder", "lora_weights"]:
107
+ raise ValueError("Required keys are (`id_encoder` and `lora_weights`) missing from the state dict.")
108
+
109
+ self.trigger_word = trigger_word
110
+ # load finetuned CLIP image encoder and fuse module here if it has not been registered to the pipeline yet
111
+ print(f"Loading PhotoMaker components [1] id_encoder from [{pretrained_model_name_or_path_or_dict}]...")
112
+ id_encoder = PhotoMakerIDEncoder()
113
+ id_encoder.load_state_dict(state_dict["id_encoder"], strict=True)
114
+ id_encoder = id_encoder.to(self.device, dtype=self.unet.dtype)
115
+ self.id_encoder = id_encoder
116
+ self.id_image_processor = CLIPImageProcessor()
117
+
118
+ # load lora into models
119
+ print(f"Loading PhotoMaker components [2] lora_weights from [{pretrained_model_name_or_path_or_dict}]")
120
+ self.load_lora_weights(state_dict["lora_weights"], adapter_name="photomaker")
121
+
122
+ # Add trigger word token
123
+ if self.tokenizer is not None:
124
+ self.tokenizer.add_tokens([self.trigger_word], special_tokens=True)
125
+
126
+ self.tokenizer_2.add_tokens([self.trigger_word], special_tokens=True)
127
+
128
+
129
+ def encode_prompt_with_trigger_word(
130
+ self,
131
+ prompt: str,
132
+ prompt_2: Optional[str] = None,
133
+ num_id_images: int = 1,
134
+ device: Optional[torch.device] = None,
135
+ prompt_embeds: Optional[torch.FloatTensor] = None,
136
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
137
+ class_tokens_mask: Optional[torch.LongTensor] = None,
138
+ ):
139
+ device = device or self._execution_device
140
+
141
+ if prompt is not None and isinstance(prompt, str):
142
+ batch_size = 1
143
+ elif prompt is not None and isinstance(prompt, list):
144
+ batch_size = len(prompt)
145
+ else:
146
+ batch_size = prompt_embeds.shape[0]
147
+
148
+ # Find the token id of the trigger word
149
+ image_token_id = self.tokenizer_2.convert_tokens_to_ids(self.trigger_word)
150
+
151
+ # Define tokenizers and text encoders
152
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
153
+ text_encoders = (
154
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
155
+ )
156
+
157
+ if prompt_embeds is None:
158
+ prompt_2 = prompt_2 or prompt
159
+ prompt_embeds_list = []
160
+ prompts = [prompt, prompt_2]
161
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
162
+ input_ids = tokenizer.encode(prompt) # TODO: batch encode
163
+ clean_index = 0
164
+ clean_input_ids = []
165
+ class_token_index = []
166
+ # Find out the corrresponding class word token based on the newly added trigger word token
167
+ for i, token_id in enumerate(input_ids):
168
+ if token_id == image_token_id:
169
+ class_token_index.append(clean_index - 1)
170
+ else:
171
+ clean_input_ids.append(token_id)
172
+ clean_index += 1
173
+
174
+ if len(class_token_index) != 1:
175
+ raise ValueError(
176
+ f"PhotoMaker currently does not support multiple trigger words in a single prompt.\
177
+ Trigger word: {self.trigger_word}, Prompt: {prompt}."
178
+ )
179
+ class_token_index = class_token_index[0]
180
+
181
+ # Expand the class word token and corresponding mask
182
+ class_token = clean_input_ids[class_token_index]
183
+ clean_input_ids = clean_input_ids[:class_token_index] + [class_token] * num_id_images + \
184
+ clean_input_ids[class_token_index+1:]
185
+
186
+ # Truncation or padding
187
+ max_len = tokenizer.model_max_length
188
+ if len(clean_input_ids) > max_len:
189
+ clean_input_ids = clean_input_ids[:max_len]
190
+ else:
191
+ clean_input_ids = clean_input_ids + [tokenizer.pad_token_id] * (
192
+ max_len - len(clean_input_ids)
193
+ )
194
+
195
+ class_tokens_mask = [True if class_token_index <= i < class_token_index+num_id_images else False \
196
+ for i in range(len(clean_input_ids))]
197
+
198
+ clean_input_ids = torch.tensor(clean_input_ids, dtype=torch.long).unsqueeze(0)
199
+ class_tokens_mask = torch.tensor(class_tokens_mask, dtype=torch.bool).unsqueeze(0)
200
+
201
+ prompt_embeds = text_encoder(
202
+ clean_input_ids.to(device),
203
+ output_hidden_states=True,
204
+ )
205
+
206
+ # We are only ALWAYS interested in the pooled output of the final text encoder
207
+ pooled_prompt_embeds = prompt_embeds[0]
208
+ prompt_embeds = prompt_embeds.hidden_states[-2]
209
+ prompt_embeds_list.append(prompt_embeds)
210
+
211
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
212
+
213
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
214
+ class_tokens_mask = class_tokens_mask.to(device=device) # TODO: ignoring two-prompt case
215
+
216
+ return prompt_embeds, pooled_prompt_embeds, class_tokens_mask
217
+
218
+
219
+ @torch.no_grad()
220
+ def __call__(
221
+ self,
222
+ prompt: Union[str, List[str]] = None,
223
+ prompt_2: Optional[Union[str, List[str]]] = None,
224
+ height: Optional[int] = None,
225
+ width: Optional[int] = None,
226
+ num_inference_steps: int = 50,
227
+ denoising_end: Optional[float] = None,
228
+ guidance_scale: float = 5.0,
229
+ negative_prompt: Optional[Union[str, List[str]]] = None,
230
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
231
+ num_images_per_prompt: Optional[int] = 1,
232
+ eta: float = 0.0,
233
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
234
+ latents: Optional[torch.FloatTensor] = None,
235
+ prompt_embeds: Optional[torch.FloatTensor] = None,
236
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
237
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
238
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
239
+ output_type: Optional[str] = "pil",
240
+ return_dict: bool = True,
241
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
242
+ guidance_rescale: float = 0.0,
243
+ original_size: Optional[Tuple[int, int]] = None,
244
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
245
+ target_size: Optional[Tuple[int, int]] = None,
246
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
247
+ callback_steps: int = 1,
248
+ # Added parameters (for PhotoMaker)
249
+ input_id_images: PipelineImageInput = None,
250
+ class_tokens_mask: Optional[torch.LongTensor] = None,
251
+ prompt_embeds_text_only: Optional[torch.FloatTensor] = None,
252
+ pooled_prompt_embeds_text_only: Optional[torch.FloatTensor] = None,
253
+ start_merge_step: int = 0,
254
+ ):
255
+ # TODO: doc
256
+ # 0. Default height and width to unet
257
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
258
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
259
+
260
+ original_size = original_size or (height, width)
261
+ target_size = target_size or (height, width)
262
+
263
+ # 1. Check inputs. Raise error if not correct
264
+ self.check_inputs(
265
+ prompt,
266
+ prompt_2,
267
+ height,
268
+ width,
269
+ callback_steps,
270
+ negative_prompt,
271
+ negative_prompt_2,
272
+ prompt_embeds,
273
+ negative_prompt_embeds,
274
+ pooled_prompt_embeds,
275
+ negative_pooled_prompt_embeds,
276
+ )
277
+ #
278
+ if prompt_embeds is not None and class_tokens_mask is None:
279
+ raise ValueError(
280
+ "If `prompt_embeds` are provided, `class_tokens_mask` also have to be passed. Make sure to generate `class_tokens_mask` from the same tokenizer that was used to generate `prompt_embeds`."
281
+ )
282
+ # check the input id images
283
+ if input_id_images is None:
284
+ raise ValueError(
285
+ "Provide `input_id_images`. Cannot leave `input_id_images` undefined for PhotoMaker pipeline."
286
+ )
287
+ if not isinstance(input_id_images, list):
288
+ input_id_images = [input_id_images]
289
+
290
+ # 2. Define call parameters
291
+ if prompt is not None and isinstance(prompt, str):
292
+ batch_size = 1
293
+ elif prompt is not None and isinstance(prompt, list):
294
+ batch_size = len(prompt)
295
+ else:
296
+ batch_size = prompt_embeds.shape[0]
297
+
298
+ device = self._execution_device
299
+
300
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
301
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
302
+ # corresponds to doing no classifier free guidance.
303
+ do_classifier_free_guidance = guidance_scale > 1.0
304
+
305
+ assert do_classifier_free_guidance
306
+
307
+ # 3. Encode input prompt
308
+ num_id_images = len(input_id_images)
309
+
310
+ (
311
+ prompt_embeds,
312
+ pooled_prompt_embeds,
313
+ class_tokens_mask,
314
+ ) = self.encode_prompt_with_trigger_word(
315
+ prompt=prompt,
316
+ prompt_2=prompt_2,
317
+ device=device,
318
+ num_id_images=num_id_images,
319
+ prompt_embeds=prompt_embeds,
320
+ pooled_prompt_embeds=pooled_prompt_embeds,
321
+ class_tokens_mask=class_tokens_mask,
322
+ )
323
+
324
+ # 4. Encode input prompt without the trigger word for delayed conditioning
325
+ prompt_text_only = prompt.replace(" "+self.trigger_word, "") # sensitive to white space
326
+ (
327
+ prompt_embeds_text_only,
328
+ negative_prompt_embeds,
329
+ pooled_prompt_embeds_text_only, # TODO: replace the pooled_prompt_embeds with text only prompt
330
+ negative_pooled_prompt_embeds,
331
+ ) = self.encode_prompt(
332
+ prompt=prompt_text_only,
333
+ prompt_2=prompt_2,
334
+ device=device,
335
+ num_images_per_prompt=num_images_per_prompt,
336
+ do_classifier_free_guidance=do_classifier_free_guidance,
337
+ negative_prompt=negative_prompt,
338
+ negative_prompt_2=negative_prompt_2,
339
+ prompt_embeds=prompt_embeds_text_only,
340
+ negative_prompt_embeds=negative_prompt_embeds,
341
+ pooled_prompt_embeds=pooled_prompt_embeds_text_only,
342
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
343
+ )
344
+
345
+ # 5. Prepare the input ID images
346
+ dtype = next(self.id_encoder.parameters()).dtype
347
+ if not isinstance(input_id_images[0], torch.Tensor):
348
+ id_pixel_values = self.id_image_processor(input_id_images, return_tensors="pt").pixel_values
349
+
350
+ id_pixel_values = id_pixel_values.unsqueeze(0).to(device=device, dtype=dtype) # TODO: multiple prompts
351
+
352
+ # 6. Get the update text embedding with the stacked ID embedding
353
+ prompt_embeds = self.id_encoder(id_pixel_values, prompt_embeds, class_tokens_mask)
354
+
355
+ bs_embed, seq_len, _ = prompt_embeds.shape
356
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
357
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
358
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
359
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
360
+ bs_embed * num_images_per_prompt, -1
361
+ )
362
+
363
+ # 7. Prepare timesteps
364
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
365
+ timesteps = self.scheduler.timesteps
366
+
367
+ # 8. Prepare latent variables
368
+ num_channels_latents = self.unet.config.in_channels
369
+ latents = self.prepare_latents(
370
+ batch_size * num_images_per_prompt,
371
+ num_channels_latents,
372
+ height,
373
+ width,
374
+ prompt_embeds.dtype,
375
+ device,
376
+ generator,
377
+ latents,
378
+ )
379
+
380
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
381
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
382
+
383
+ # 10. Prepare added time ids & embeddings
384
+ if self.text_encoder_2 is None:
385
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
386
+ else:
387
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
388
+
389
+ add_time_ids = self._get_add_time_ids(
390
+ original_size,
391
+ crops_coords_top_left,
392
+ target_size,
393
+ dtype=prompt_embeds.dtype,
394
+ text_encoder_projection_dim=text_encoder_projection_dim,
395
+ )
396
+ add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
397
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
398
+
399
+ # 11. Denoising loop
400
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
401
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
402
+ for i, t in enumerate(timesteps):
403
+ latent_model_input = (
404
+ torch.cat([latents] * 2) if do_classifier_free_guidance else latents
405
+ )
406
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
407
+
408
+ if i <= start_merge_step:
409
+ current_prompt_embeds = torch.cat(
410
+ [negative_prompt_embeds, prompt_embeds_text_only], dim=0
411
+ )
412
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds_text_only], dim=0)
413
+ else:
414
+ current_prompt_embeds = torch.cat(
415
+ [negative_prompt_embeds, prompt_embeds], dim=0
416
+ )
417
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
418
+ # predict the noise residual
419
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
420
+ noise_pred = self.unet(
421
+ latent_model_input,
422
+ t,
423
+ encoder_hidden_states=current_prompt_embeds,
424
+ cross_attention_kwargs=cross_attention_kwargs,
425
+ added_cond_kwargs=added_cond_kwargs,
426
+ return_dict=False,
427
+ )[0]
428
+
429
+ # perform guidance
430
+ if do_classifier_free_guidance:
431
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
432
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
433
+
434
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
435
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
436
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
437
+
438
+ # compute the previous noisy sample x_t -> x_t-1
439
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
440
+
441
+ # call the callback, if provided
442
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
443
+ progress_bar.update()
444
+ if callback is not None and i % callback_steps == 0:
445
+ callback(i, t, latents)
446
+
447
+ # make sure the VAE is in float32 mode, as it overflows in float16
448
+ if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
449
+ self.upcast_vae()
450
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
451
+
452
+ if not output_type == "latent":
453
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
454
+ else:
455
+ image = latents
456
+ return StableDiffusionXLPipelineOutput(images=image)
457
+
458
+ # apply watermark if available
459
+ # if self.watermark is not None:
460
+ # image = self.watermark.apply_watermark(image)
461
+
462
+ image = self.image_processor.postprocess(image, output_type=output_type)
463
+
464
+ # Offload last model to CPU
465
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
466
+ self.final_offload_hook.offload()
467
+
468
+ if not return_dict:
469
+ return (image,)
470
+
471
+ return StableDiffusionXLPipelineOutput(images=image)
style_template.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ style_list = [
2
+ {
3
+ "name": "(No style)",
4
+ "prompt": "{prompt}",
5
+ "negative_prompt": "",
6
+ },
7
+ {
8
+ "name": "Cinematic",
9
+ "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
10
+ "negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
11
+ },
12
+ {
13
+ "name": "Disney Charactor",
14
+ "prompt": "A Pixar animation character of {prompt} . pixar-style, studio anime, Disney, high-quality",
15
+ "negative_prompt": "lowres, bad anatomy, bad hands, text, bad eyes, bad arms, bad legs, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, blurry, grayscale, noisy, sloppy, messy, grainy, highly detailed, ultra textured, photo",
16
+ },
17
+ {
18
+ "name": "Digital Art",
19
+ "prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed",
20
+ "negative_prompt": "photo, photorealistic, realism, ugly",
21
+ },
22
+ {
23
+ "name": "Photographic (Default)",
24
+ "prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
25
+ "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
26
+ },
27
+ {
28
+ "name": "Fantasy art",
29
+ "prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
30
+ "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
31
+ },
32
+ {
33
+ "name": "Neonpunk",
34
+ "prompt": "neonpunk style {prompt} . cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
35
+ "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
36
+ },
37
+ {
38
+ "name": "Enhance",
39
+ "prompt": "breathtaking {prompt} . award-winning, professional, highly detailed",
40
+ "negative_prompt": "ugly, deformed, noisy, blurry, distorted, grainy",
41
+ },
42
+ {
43
+ "name": "Comic book",
44
+ "prompt": "comic {prompt} . graphic illustration, comic art, graphic novel art, vibrant, highly detailed",
45
+ "negative_prompt": "photograph, deformed, glitch, noisy, realistic, stock photo",
46
+ },
47
+ {
48
+ "name": "Lowpoly",
49
+ "prompt": "low-poly style {prompt} . low-poly game art, polygon mesh, jagged, blocky, wireframe edges, centered composition",
50
+ "negative_prompt": "noisy, sloppy, messy, grainy, highly detailed, ultra textured, photo",
51
+ },
52
+ {
53
+ "name": "Line art",
54
+ "prompt": "line art drawing {prompt} . professional, sleek, modern, minimalist, graphic, line art, vector graphics",
55
+ "negative_prompt": "anime, photorealistic, 35mm film, deformed, glitch, blurry, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, mutated, realism, realistic, impressionism, expressionism, oil, acrylic",
56
+ }
57
+ ]
58
+
59
+ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}