Datasets:

License:
File size: 4,050 Bytes
18b1fea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# SDXL: 0.613, 0.5566, 0.54, 0.4162, 0.4042, 0.4596, 0.5374, 0.5286, 0.5038
# SD: 0.5396, 0.5707, 0.477, 0.4665, 0.5419, 0.4594, 0.4857, 0.4741, 0.4804

from diffusers import DiffusionPipeline
from huggingface_hub import upload_folder
from peft import LoraConfig
import argparse
import torch

from peft.utils import get_peft_model_state_dict
from diffusers.utils import convert_state_dict_to_diffusers
from diffusers.loaders import StableDiffusionXLLoraLoaderMixin, LoraLoaderMixin
from huggingface_hub import create_repo, upload_folder


mapping = {
    "hf-internal-testing/tiny-sd-pipe": "hf-internal-testing/tiny-sd-lora-peft",
    "hf-internal-testing/tiny-sdxl-pipe": "hf-internal-testing/tiny-sdxl-lora-peft",
}


def load_pipeline(pipeline_id):
    pipe = DiffusionPipeline.from_pretrained(pipeline_id)
    return pipe


def get_lora_config():
    rank = 4

    torch.manual_seed(0)
    text_lora_config = LoraConfig(
        r=rank,
        lora_alpha=rank,
        target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
        init_lora_weights=False,
    )

    torch.manual_seed(0)
    unet_lora_config = LoraConfig(
        r=rank,
        lora_alpha=rank,
        target_modules=["to_q", "to_k", "to_v", "to_out.0"],
        init_lora_weights=False,
    )
    return text_lora_config, unet_lora_config


def get_dummy_inputs():
    pipeline_inputs = {
        "prompt": "A painting of a squirrel eating a burger",
        "num_inference_steps": 2,
        "guidance_scale": 6.0,
        "output_type": "np",
        "generator": torch.manual_seed(0),
    }
    return pipeline_inputs


def run_inference(args):
    has_two_text_encoders = False
    pipe = load_pipeline(pipeline_id=args.pipeline_id)
    text_lora_config, unet_lora_config = get_lora_config()

    pipe.text_encoder.add_adapter(text_lora_config)
    pipe.unet.add_adapter(unet_lora_config)
    if hasattr(pipe, "text_encoder_2"):
        pipe.text_encoder_2.add_adapter(text_lora_config)
        has_two_text_encoders = True

    inputs = get_dummy_inputs()
    outputs = pipe(**inputs).images
    predicted_slice = outputs[0, -3:, -3:, -1].flatten().tolist()

    print(", ".join([str(round(x, 4)) for x in predicted_slice]))

    if args.push_to_hub:
        text_encoder_state_dict = convert_state_dict_to_diffusers(
            get_peft_model_state_dict(pipe.text_encoder)
        )
        unet_state_dict = convert_state_dict_to_diffusers(
            get_peft_model_state_dict(pipe.unet)
        )
        if has_two_text_encoders:
            text_encoder_2_state_dict = convert_state_dict_to_diffusers(
                get_peft_model_state_dict(pipe.text_encoder_2)
            )

        serialization_cls = (
            StableDiffusionXLLoraLoaderMixin
            if has_two_text_encoders
            else LoraLoaderMixin
        )
        output_dir = mapping[args.pipeline_id].split("/")[-1]

        if not has_two_text_encoders:
            serialization_cls.save_lora_weights(
                save_directory=output_dir,
                unet_lora_layers=unet_state_dict,
                text_encoder_lora_layers=text_encoder_state_dict,
            )
        else:
            serialization_cls.save_lora_weights(
                save_directory=output_dir,
                unet_lora_layers=unet_state_dict,
                text_encoder_lora_layers=text_encoder_state_dict,
                text_encoder_2_lora_layers=text_encoder_2_state_dict,
            )

        repo_id = create_repo(repo_id=mapping[args.pipeline_id], exist_ok=True).repo_id
        upload_folder(repo_id=repo_id, folder_path=output_dir)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--pipeline_id",
        type=str,
        default="hf-internal-testing/tiny-sd-pipe",
        choices=[
            "hf-internal-testing/tiny-sd-pipe",
            "hf-internal-testing/tiny-sdxl-pipe",
        ],
    )
    parser.add_argument("--push_to_hub", action="store_true")
    args = parser.parse_args()

    run_inference(args)