bbcmc camenduru commited on
Commit
bb5f923
0 Parent(s):

Duplicate from camenduru/AnimateDiff

Browse files

Co-authored-by: camenduru <camenduru@users.noreply.huggingface.co>

Files changed (40) hide show
  1. .gitattributes +35 -0
  2. CounterfeitV30_v30.safetensors +3 -0
  3. FilmVelvia2.safetensors +3 -0
  4. Pyramid lora_Ghibli_n3.safetensors +3 -0
  5. StableDiffusion/README.md +203 -0
  6. StableDiffusion/feature_extractor/preprocessor_config.json +20 -0
  7. StableDiffusion/model_index.json +32 -0
  8. StableDiffusion/safety_checker/config.json +175 -0
  9. StableDiffusion/safety_checker/pytorch_model.bin +3 -0
  10. StableDiffusion/scheduler/scheduler_config.json +13 -0
  11. StableDiffusion/text_encoder/config.json +25 -0
  12. StableDiffusion/text_encoder/pytorch_model.bin +3 -0
  13. StableDiffusion/tokenizer/merges.txt +0 -0
  14. StableDiffusion/tokenizer/special_tokens_map.json +24 -0
  15. StableDiffusion/tokenizer/tokenizer_config.json +34 -0
  16. StableDiffusion/tokenizer/vocab.json +0 -0
  17. StableDiffusion/unet/config.json +37 -0
  18. StableDiffusion/unet/diffusion_pytorch_model.bin +3 -0
  19. StableDiffusion/vae/config.json +30 -0
  20. StableDiffusion/vae/diffusion_pytorch_model.bin +3 -0
  21. TUSUN.safetensors +3 -0
  22. lyriel_v16.safetensors +3 -0
  23. majicmixRealistic_v5Preview.safetensors +3 -0
  24. mm_sd_v14.ckpt +3 -0
  25. mm_sd_v15.ckpt +3 -0
  26. mm_sd_v15_v2.ckpt +3 -0
  27. moonfilm_filmGrain10.safetensors +3 -0
  28. moonfilm_reality20.safetensors +3 -0
  29. rcnzCartoon3d_v10.safetensors +3 -0
  30. realisticVisionV20_v20.safetensors +3 -0
  31. realisticVisionV40_v20Novae.safetensors +3 -0
  32. toonyou_beta3.safetensors +3 -0
  33. v2_lora_PanDown.ckpt +3 -0
  34. v2_lora_PanLeft.ckpt +3 -0
  35. v2_lora_PanRight.ckpt +3 -0
  36. v2_lora_PanUp.ckpt +3 -0
  37. v2_lora_RollingAnticlockwise.ckpt +3 -0
  38. v2_lora_RollingClockwise.ckpt +3 -0
  39. v2_lora_ZoomIn.ckpt +3 -0
  40. v2_lora_ZoomOut.ckpt +3 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
CounterfeitV30_v30.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbfba64e662370f59d4aa2aa69bf16749fce93846ccce20506aee5df01169859
3
+ size 4244124028
FilmVelvia2.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:803f3fa8d735fe885f8d6d0cb79f6fc1a033b18ab05e0dd020c050d7423ebda2
3
+ size 151108832
Pyramid lora_Ghibli_n3.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84a3c2cb0cfcaf0553512ca3c0e0b01cf420acaaf763388851749b3fb6a287d3
3
+ size 151108832
StableDiffusion/README.md ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ tags:
4
+ - stable-diffusion
5
+ - stable-diffusion-diffusers
6
+ - text-to-image
7
+ inference: false
8
+ extra_gated_prompt: |-
9
+ This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
10
+ 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
11
+ 2. CompVis claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
12
+ 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
13
+ Please read the full license here: https://huggingface.co/spaces/CompVis/stable-diffusion-license
14
+
15
+ By clicking on "Access repository" below, you accept that your *contact information* (email address and username) can be shared with the model authors as well.
16
+
17
+ extra_gated_fields:
18
+ I have read the License and agree with its terms: checkbox
19
+ ---
20
+
21
+ # Stable Diffusion v1-5 Model Card
22
+
23
+ Stable Diffusion is a latent text-to-image diffusion model capable of generating photo-realistic images given any text input.
24
+ For more information about how Stable Diffusion functions, please have a look at [🤗's Stable Diffusion blog](https://huggingface.co/blog/stable_diffusion).
25
+
26
+ The **Stable-Diffusion-v1-5** checkpoint was initialized with the weights of the [Stable-Diffusion-v1-2](https:/steps/huggingface.co/CompVis/stable-diffusion-v1-2)
27
+ checkpoint and subsequently fine-tuned on 595k steps at resolution 512x512 on "laion-aesthetics v2 5+" and 10% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).
28
+
29
+ You can use this both with the [🧨Diffusers library](https://github.com/huggingface/diffusers) and the [RunwayML GitHub repository](https://github.com/runwayml/stable-diffusion).
30
+
31
+ ### Diffusers
32
+ ```py
33
+ from diffusers import StableDiffusionPipeline
34
+
35
+ model_id = "runwayml/stable-diffusion-v1-5"
36
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16")
37
+ pipe = pipe.to(device)
38
+
39
+ prompt = "a photo of an astronaut riding a horse on mars"
40
+ image = pipe(prompt).images[0]
41
+
42
+ image.save("astronaut_rides_horse.png")
43
+ ```
44
+ For more detailed instructions, use-cases and examples in JAX follow the instructions [here](https://github.com/huggingface/diffusers#text-to-image-generation-with-stable-diffusion)
45
+
46
+ ### Original GitHub Repository
47
+
48
+ 1. Download the weights [v1-5-pruned-emaonly.ckpt](https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt)
49
+ 2. Follow instructions [here](https://github.com/runwayml/stable-diffusion).
50
+
51
+ ## Model Details
52
+ - **Developed by:** Robin Rombach, Patrick Esser
53
+ - **Model type:** Diffusion-based text-to-image generation model
54
+ - **Language(s):** English
55
+ - **License:** [The CreativeML OpenRAIL M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based.
56
+ - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([CLIP ViT-L/14](https://arxiv.org/abs/2103.00020)) as suggested in the [Imagen paper](https://arxiv.org/abs/2205.11487).
57
+ - **Resources for more information:** [GitHub Repository](https://github.com/CompVis/stable-diffusion), [Paper](https://arxiv.org/abs/2112.10752).
58
+ - **Cite as:**
59
+
60
+ @InProceedings{Rombach_2022_CVPR,
61
+ author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
62
+ title = {High-Resolution Image Synthesis With Latent Diffusion Models},
63
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
64
+ month = {June},
65
+ year = {2022},
66
+ pages = {10684-10695}
67
+ }
68
+
69
+ # Uses
70
+
71
+ ## Direct Use
72
+ The model is intended for research purposes only. Possible research areas and
73
+ tasks include
74
+
75
+ - Safe deployment of models which have the potential to generate harmful content.
76
+ - Probing and understanding the limitations and biases of generative models.
77
+ - Generation of artworks and use in design and other artistic processes.
78
+ - Applications in educational or creative tools.
79
+ - Research on generative models.
80
+
81
+ Excluded uses are described below.
82
+
83
+ ### Misuse, Malicious Use, and Out-of-Scope Use
84
+ _Note: This section is taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), but applies in the same way to Stable Diffusion v1_.
85
+
86
+
87
+ The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
88
+
89
+ #### Out-of-Scope Use
90
+ The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model.
91
+
92
+ #### Misuse and Malicious Use
93
+ Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to:
94
+
95
+ - Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc.
96
+ - Intentionally promoting or propagating discriminatory content or harmful stereotypes.
97
+ - Impersonating individuals without their consent.
98
+ - Sexual content without consent of the people who might see it.
99
+ - Mis- and disinformation
100
+ - Representations of egregious violence and gore
101
+ - Sharing of copyrighted or licensed material in violation of its terms of use.
102
+ - Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use.
103
+
104
+ ## Limitations and Bias
105
+
106
+ ### Limitations
107
+
108
+ - The model does not achieve perfect photorealism
109
+ - The model cannot render legible text
110
+ - The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere”
111
+ - Faces and people in general may not be generated properly.
112
+ - The model was trained mainly with English captions and will not work as well in other languages.
113
+ - The autoencoding part of the model is lossy
114
+ - The model was trained on a large-scale dataset
115
+ [LAION-5B](https://laion.ai/blog/laion-5b/) which contains adult material
116
+ and is not fit for product use without additional safety mechanisms and
117
+ considerations.
118
+ - No additional measures were used to deduplicate the dataset. As a result, we observe some degree of memorization for images that are duplicated in the training data.
119
+ The training data can be searched at [https://rom1504.github.io/clip-retrieval/](https://rom1504.github.io/clip-retrieval/) to possibly assist in the detection of memorized images.
120
+
121
+ ### Bias
122
+
123
+ While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
124
+ Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/),
125
+ which consists of images that are primarily limited to English descriptions.
126
+ Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for.
127
+ This affects the overall output of the model, as white and western cultures are often set as the default. Further, the
128
+ ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
129
+
130
+ ### Safety Module
131
+
132
+ The intended use of this model is with the [Safety Checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) in Diffusers.
133
+ This checker works by checking model outputs against known hard-coded NSFW concepts.
134
+ The concepts are intentionally hidden to reduce the likelihood of reverse-engineering this filter.
135
+ Specifically, the checker compares the class probability of harmful concepts in the embedding space of the `CLIPTextModel` *after generation* of the images.
136
+ The concepts are passed into the model with the generated image and compared to a hand-engineered weight for each NSFW concept.
137
+
138
+
139
+ ## Training
140
+
141
+ **Training Data**
142
+ The model developers used the following dataset for training the model:
143
+
144
+ - LAION-2B (en) and subsets thereof (see next section)
145
+
146
+ **Training Procedure**
147
+ Stable Diffusion v1-4 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training,
148
+
149
+ - Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4
150
+ - Text prompts are encoded through a ViT-L/14 text-encoder.
151
+ - The non-pooled output of the text encoder is fed into the UNet backbone of the latent diffusion model via cross-attention.
152
+ - The loss is a reconstruction objective between the noise that was added to the latent and the prediction made by the UNet.
153
+
154
+ We currently provide four checkpoints, which were trained as follows.
155
+ - [`stable-diffusion-v1-1`](https://huggingface.co/CompVis/stable-diffusion-v1-1): 237,000 steps at resolution `256x256` on [laion2B-en](https://huggingface.co/datasets/laion/laion2B-en).
156
+ 194,000 steps at resolution `512x512` on [laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) (170M examples from LAION-5B with resolution `>= 1024x1024`).
157
+ - [`stable-diffusion-v1-2`](https://huggingface.co/CompVis/stable-diffusion-v1-2): Resumed from `stable-diffusion-v1-1`.
158
+ 515,000 steps at resolution `512x512` on "laion-improved-aesthetics" (a subset of laion2B-en,
159
+ filtered to images with an original size `>= 512x512`, estimated aesthetics score `> 5.0`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the LAION-5B metadata, the aesthetics score is estimated using an [improved aesthetics estimator](https://github.com/christophschuhmann/improved-aesthetic-predictor)).
160
+ - [`stable-diffusion-v1-3`](https://huggingface.co/CompVis/stable-diffusion-v1-3): Resumed from `stable-diffusion-v1-2`. 195,000 steps at resolution `512x512` on "laion-improved-aesthetics" and 10 % dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).
161
+ - [`stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) Resumed from `stable-diffusion-v1-2`.225,000 steps at resolution `512x512` on "laion-aesthetics v2 5+" and 10 % dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).
162
+ - [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) Resumed from `stable-diffusion-v1-2` 595,000 steps at resolution `512x512` on "laion-aesthetics v2 5+" and 10 % dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).
163
+
164
+ - **Hardware:** 32 x 8 x A100 GPUs
165
+ - **Optimizer:** AdamW
166
+ - **Gradient Accumulations**: 2
167
+ - **Batch:** 32 x 8 x 2 x 4 = 2048
168
+ - **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant
169
+
170
+ ## Evaluation Results
171
+ Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0,
172
+ 5.0, 6.0, 7.0, 8.0) and 50 PNDM/PLMS sampling
173
+ steps show the relative improvements of the checkpoints:
174
+
175
+ ![pareto](https://huggingface.co/CompVis/stable-diffusion/resolve/main/v1-1-to-v1-5.png)
176
+
177
+ Evaluated using 50 PLMS steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores.
178
+ ## Environmental Impact
179
+
180
+ **Stable Diffusion v1** **Estimated Emissions**
181
+ Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact.
182
+
183
+ - **Hardware Type:** A100 PCIe 40GB
184
+ - **Hours used:** 150000
185
+ - **Cloud Provider:** AWS
186
+ - **Compute Region:** US-east
187
+ - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 11250 kg CO2 eq.
188
+
189
+
190
+ ## Citation
191
+
192
+ ```bibtex
193
+ @InProceedings{Rombach_2022_CVPR,
194
+ author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
195
+ title = {High-Resolution Image Synthesis With Latent Diffusion Models},
196
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
197
+ month = {June},
198
+ year = {2022},
199
+ pages = {10684-10695}
200
+ }
201
+ ```
202
+
203
+ *This model card was written by: Robin Rombach and Patrick Esser and is based on the [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).*
StableDiffusion/feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": 224,
3
+ "do_center_crop": true,
4
+ "do_convert_rgb": true,
5
+ "do_normalize": true,
6
+ "do_resize": true,
7
+ "feature_extractor_type": "CLIPFeatureExtractor",
8
+ "image_mean": [
9
+ 0.48145466,
10
+ 0.4578275,
11
+ 0.40821073
12
+ ],
13
+ "image_std": [
14
+ 0.26862954,
15
+ 0.26130258,
16
+ 0.27577711
17
+ ],
18
+ "resample": 3,
19
+ "size": 224
20
+ }
StableDiffusion/model_index.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.6.0",
4
+ "feature_extractor": [
5
+ "transformers",
6
+ "CLIPFeatureExtractor"
7
+ ],
8
+ "safety_checker": [
9
+ "stable_diffusion",
10
+ "StableDiffusionSafetyChecker"
11
+ ],
12
+ "scheduler": [
13
+ "diffusers",
14
+ "PNDMScheduler"
15
+ ],
16
+ "text_encoder": [
17
+ "transformers",
18
+ "CLIPTextModel"
19
+ ],
20
+ "tokenizer": [
21
+ "transformers",
22
+ "CLIPTokenizer"
23
+ ],
24
+ "unet": [
25
+ "diffusers",
26
+ "UNet2DConditionModel"
27
+ ],
28
+ "vae": [
29
+ "diffusers",
30
+ "AutoencoderKL"
31
+ ]
32
+ }
StableDiffusion/safety_checker/config.json ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "_name_or_path": "/home/patrick/stable-diffusion-v1-5/safety_checker",
4
+ "architectures": [
5
+ "StableDiffusionSafetyChecker"
6
+ ],
7
+ "initializer_factor": 1.0,
8
+ "logit_scale_init_value": 2.6592,
9
+ "model_type": "clip",
10
+ "projection_dim": 768,
11
+ "text_config": {
12
+ "_name_or_path": "",
13
+ "add_cross_attention": false,
14
+ "architectures": null,
15
+ "attention_dropout": 0.0,
16
+ "bad_words_ids": null,
17
+ "bos_token_id": 0,
18
+ "chunk_size_feed_forward": 0,
19
+ "cross_attention_hidden_size": null,
20
+ "decoder_start_token_id": null,
21
+ "diversity_penalty": 0.0,
22
+ "do_sample": false,
23
+ "dropout": 0.0,
24
+ "early_stopping": false,
25
+ "encoder_no_repeat_ngram_size": 0,
26
+ "eos_token_id": 2,
27
+ "exponential_decay_length_penalty": null,
28
+ "finetuning_task": null,
29
+ "forced_bos_token_id": null,
30
+ "forced_eos_token_id": null,
31
+ "hidden_act": "quick_gelu",
32
+ "hidden_size": 768,
33
+ "id2label": {
34
+ "0": "LABEL_0",
35
+ "1": "LABEL_1"
36
+ },
37
+ "initializer_factor": 1.0,
38
+ "initializer_range": 0.02,
39
+ "intermediate_size": 3072,
40
+ "is_decoder": false,
41
+ "is_encoder_decoder": false,
42
+ "label2id": {
43
+ "LABEL_0": 0,
44
+ "LABEL_1": 1
45
+ },
46
+ "layer_norm_eps": 1e-05,
47
+ "length_penalty": 1.0,
48
+ "max_length": 20,
49
+ "max_position_embeddings": 77,
50
+ "min_length": 0,
51
+ "model_type": "clip_text_model",
52
+ "no_repeat_ngram_size": 0,
53
+ "num_attention_heads": 12,
54
+ "num_beam_groups": 1,
55
+ "num_beams": 1,
56
+ "num_hidden_layers": 12,
57
+ "num_return_sequences": 1,
58
+ "output_attentions": false,
59
+ "output_hidden_states": false,
60
+ "output_scores": false,
61
+ "pad_token_id": 1,
62
+ "prefix": null,
63
+ "problem_type": null,
64
+ "pruned_heads": {},
65
+ "remove_invalid_values": false,
66
+ "repetition_penalty": 1.0,
67
+ "return_dict": true,
68
+ "return_dict_in_generate": false,
69
+ "sep_token_id": null,
70
+ "task_specific_params": null,
71
+ "temperature": 1.0,
72
+ "tf_legacy_loss": false,
73
+ "tie_encoder_decoder": false,
74
+ "tie_word_embeddings": true,
75
+ "tokenizer_class": null,
76
+ "top_k": 50,
77
+ "top_p": 1.0,
78
+ "torch_dtype": null,
79
+ "torchscript": false,
80
+ "transformers_version": "4.22.0.dev0",
81
+ "typical_p": 1.0,
82
+ "use_bfloat16": false,
83
+ "vocab_size": 49408
84
+ },
85
+ "text_config_dict": {
86
+ "hidden_size": 768,
87
+ "intermediate_size": 3072,
88
+ "num_attention_heads": 12,
89
+ "num_hidden_layers": 12
90
+ },
91
+ "torch_dtype": "float16",
92
+ "transformers_version": null,
93
+ "vision_config": {
94
+ "_name_or_path": "",
95
+ "add_cross_attention": false,
96
+ "architectures": null,
97
+ "attention_dropout": 0.0,
98
+ "bad_words_ids": null,
99
+ "bos_token_id": null,
100
+ "chunk_size_feed_forward": 0,
101
+ "cross_attention_hidden_size": null,
102
+ "decoder_start_token_id": null,
103
+ "diversity_penalty": 0.0,
104
+ "do_sample": false,
105
+ "dropout": 0.0,
106
+ "early_stopping": false,
107
+ "encoder_no_repeat_ngram_size": 0,
108
+ "eos_token_id": null,
109
+ "exponential_decay_length_penalty": null,
110
+ "finetuning_task": null,
111
+ "forced_bos_token_id": null,
112
+ "forced_eos_token_id": null,
113
+ "hidden_act": "quick_gelu",
114
+ "hidden_size": 1024,
115
+ "id2label": {
116
+ "0": "LABEL_0",
117
+ "1": "LABEL_1"
118
+ },
119
+ "image_size": 224,
120
+ "initializer_factor": 1.0,
121
+ "initializer_range": 0.02,
122
+ "intermediate_size": 4096,
123
+ "is_decoder": false,
124
+ "is_encoder_decoder": false,
125
+ "label2id": {
126
+ "LABEL_0": 0,
127
+ "LABEL_1": 1
128
+ },
129
+ "layer_norm_eps": 1e-05,
130
+ "length_penalty": 1.0,
131
+ "max_length": 20,
132
+ "min_length": 0,
133
+ "model_type": "clip_vision_model",
134
+ "no_repeat_ngram_size": 0,
135
+ "num_attention_heads": 16,
136
+ "num_beam_groups": 1,
137
+ "num_beams": 1,
138
+ "num_channels": 3,
139
+ "num_hidden_layers": 24,
140
+ "num_return_sequences": 1,
141
+ "output_attentions": false,
142
+ "output_hidden_states": false,
143
+ "output_scores": false,
144
+ "pad_token_id": null,
145
+ "patch_size": 14,
146
+ "prefix": null,
147
+ "problem_type": null,
148
+ "pruned_heads": {},
149
+ "remove_invalid_values": false,
150
+ "repetition_penalty": 1.0,
151
+ "return_dict": true,
152
+ "return_dict_in_generate": false,
153
+ "sep_token_id": null,
154
+ "task_specific_params": null,
155
+ "temperature": 1.0,
156
+ "tf_legacy_loss": false,
157
+ "tie_encoder_decoder": false,
158
+ "tie_word_embeddings": true,
159
+ "tokenizer_class": null,
160
+ "top_k": 50,
161
+ "top_p": 1.0,
162
+ "torch_dtype": null,
163
+ "torchscript": false,
164
+ "transformers_version": "4.22.0.dev0",
165
+ "typical_p": 1.0,
166
+ "use_bfloat16": false
167
+ },
168
+ "vision_config_dict": {
169
+ "hidden_size": 1024,
170
+ "intermediate_size": 4096,
171
+ "num_attention_heads": 16,
172
+ "num_hidden_layers": 24,
173
+ "patch_size": 14
174
+ }
175
+ }
StableDiffusion/safety_checker/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d37ca6e57ace94e4c2f03ed0f67b6dc83e1ef1160892074917aa68b28e2afc1
3
+ size 608098599
StableDiffusion/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.6.0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "num_train_timesteps": 1000,
8
+ "set_alpha_to_one": false,
9
+ "skip_prk_steps": true,
10
+ "steps_offset": 1,
11
+ "trained_betas": null,
12
+ "clip_sample": false
13
+ }
StableDiffusion/text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/patrick/stable-diffusion-v1-5/text_encoder",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.22.0.dev0",
24
+ "vocab_size": 49408
25
+ }
StableDiffusion/text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88bd85efb0f84e70521633f578715afb2873db4f2615fdfb1f66e99934715865
3
+ size 246184375
StableDiffusion/tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
StableDiffusion/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
StableDiffusion/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "do_lower_case": true,
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 77,
22
+ "name_or_path": "/home/patrick/stable-diffusion-v1-5/tokenizer",
23
+ "pad_token": "<|endoftext|>",
24
+ "special_tokens_map_file": "./special_tokens_map.json",
25
+ "tokenizer_class": "CLIPTokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
StableDiffusion/tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
StableDiffusion/unet/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.6.0",
4
+ "_name_or_path": "/home/patrick/stable-diffusion-v1-5/unet",
5
+ "act_fn": "silu",
6
+ "attention_head_dim": 8,
7
+ "block_out_channels": [
8
+ 320,
9
+ 640,
10
+ 1280,
11
+ 1280
12
+ ],
13
+ "center_input_sample": false,
14
+ "cross_attention_dim": 768,
15
+ "down_block_types": [
16
+ "CrossAttnDownBlock2D",
17
+ "CrossAttnDownBlock2D",
18
+ "CrossAttnDownBlock2D",
19
+ "DownBlock2D"
20
+ ],
21
+ "downsample_padding": 1,
22
+ "flip_sin_to_cos": true,
23
+ "freq_shift": 0,
24
+ "in_channels": 4,
25
+ "layers_per_block": 2,
26
+ "mid_block_scale_factor": 1,
27
+ "norm_eps": 1e-05,
28
+ "norm_num_groups": 32,
29
+ "out_channels": 4,
30
+ "sample_size": 64,
31
+ "up_block_types": [
32
+ "UpBlock2D",
33
+ "CrossAttnUpBlock2D",
34
+ "CrossAttnUpBlock2D",
35
+ "CrossAttnUpBlock2D"
36
+ ]
37
+ }
StableDiffusion/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5019a4fbb455dd9b75192afc3ecf8a8ec875e83812fd51029d2e19277edddebc
3
+ size 1719312805
StableDiffusion/vae/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.6.0",
4
+ "_name_or_path": "/home/patrick/stable-diffusion-v1-5/vae",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 256,
24
+ "up_block_types": [
25
+ "UpDecoderBlock2D",
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D"
29
+ ]
30
+ }
StableDiffusion/vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51c8904bc921e1e6f354b5fa8e99a1c82ead2f0540114de21557b8abfbb24ad0
3
+ size 167399505
TUSUN.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:549f98880c2dcfe7f6219a89a1b841fbdff832a8b5f7eeefeaa9083c4a29da1c
3
+ size 188857736
lyriel_v16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec6f68ea6388951d53d6c8178c22aecfd1b4fedfe31f5a5814ddd7638c4eff37
3
+ size 2132625432
majicmixRealistic_v5Preview.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a38fa861a24f4f4c6e0f68289101e645dd9ca1e93e1049cc8a4f2a77513fad52
3
+ size 2400040290
mm_sd_v14.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa7fd8a200a89031edd84487e2a757c5315460eca528fa70d4b3885c399bffd5
3
+ size 1672133921
mm_sd_v15.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf16ea656cb16124990c8e2c70a29c793f9841f3a2223073fac8bd89ebd9b69a
3
+ size 1672133921
mm_sd_v15_v2.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69ed0f5fef82b110aca51bcab73b21104242bc65d6ab4b8b2a2a94d31cad1bf0
3
+ size 1817888431
moonfilm_filmGrain10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88ea5a84a3d44bf2d2135d7b4cda7d83885aef6fc08bf1877d16e8731ea0a5c8
3
+ size 5838304246
moonfilm_reality20.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4a6146af2191742b7b88769b42b7a36294717523bf23aef5306907ea88a1a75
3
+ size 5816955842
rcnzCartoon3d_v10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6b4c0392d7486bfa4fd1a31c7b7d2679f743f8ea8d9f219c82b5c33db31ddb9
3
+ size 2132625644
realisticVisionV20_v20.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0d1994c73d784a17a5b335ae8bda02dcc8dd2fc5f5dbf55169d5aab385e53f2
3
+ size 2132650523
realisticVisionV40_v20Novae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0d1994c73d784a17a5b335ae8bda02dcc8dd2fc5f5dbf55169d5aab385e53f2
3
+ size 2132650523
toonyou_beta3.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52768d2bc4e73af920df3466d0806d9a40727dab0869fc7aa2f3bc82214bb071
3
+ size 2132626252
v2_lora_PanDown.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09e4d5448aba4ea51b3bcd4b5d2b058ed4b47bb72d94d8c05a3ccce3368db6d9
3
+ size 77474499
v2_lora_PanLeft.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed79025f8bea018c8925f43b6304a27e462335b6ec5e6f8a222c2726153844b3
3
+ size 77474499
v2_lora_PanRight.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4eb9154623c628c76dbd83109f125617c985490fec36ddca5464eb61ac7f6d5
3
+ size 77474499
v2_lora_PanUp.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0ee2f181fc69d7fe26e013ad5cfea11f25cb9f5e8fded3c9942b61803cd6c3d
3
+ size 77474499
v2_lora_RollingAnticlockwise.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ae6cbc81044895243bba9a64df9666db763a52acfd8e496c490af84e812748a
3
+ size 77474499
v2_lora_RollingClockwise.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:361b1af8500d7fd09c2f884fac5dc0397a4323bae8fab5233443de5383d13630
3
+ size 77474499
v2_lora_ZoomIn.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70ce8b9057b173b9249c48aca5d66c8aa1d8aaa040fda394e50e37f3e278195e
3
+ size 77474499
v2_lora_ZoomOut.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4172fb2d36410ef638ae0e29d604b66c11ee44b94db9c7cc5ee34d7f865c55d9
3
+ size 77474499