patrickvonplaten commited on
Commit
812f2ce
1 Parent(s): a5f5b19

correct diffusers

Browse files
feature_extractor/preprocessor_config.json CHANGED
@@ -14,7 +14,7 @@
14
  0.4578275,
15
  0.40821073
16
  ],
17
- "image_processor_type": "CLIPFeatureExtractor",
18
  "image_std": [
19
  0.26862954,
20
  0.26130258,
 
14
  0.4578275,
15
  0.40821073
16
  ],
17
+ "image_processor_type": "CLIPImageProcessor",
18
  "image_std": [
19
  0.26862954,
20
  0.26130258,
model_index.json CHANGED
@@ -3,7 +3,7 @@
3
  "_diffusers_version": "0.12.0.dev0",
4
  "feature_extractor": [
5
  "transformers",
6
- "CLIPFeatureExtractor"
7
  ],
8
  "requires_safety_checker": true,
9
  "safety_checker": [
 
3
  "_diffusers_version": "0.12.0.dev0",
4
  "feature_extractor": [
5
  "transformers",
6
+ "CLIPImageProcessor"
7
  ],
8
  "requires_safety_checker": true,
9
  "safety_checker": [
safety_checker/config.json CHANGED
@@ -80,7 +80,7 @@
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
83
- "transformers_version": "4.26.0.dev0",
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "vocab_size": 49408
@@ -167,7 +167,7 @@
167
  "top_p": 1.0,
168
  "torch_dtype": null,
169
  "torchscript": false,
170
- "transformers_version": "4.26.0.dev0",
171
  "typical_p": 1.0,
172
  "use_bfloat16": false
173
  },
 
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
83
+ "transformers_version": "4.25.1",
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "vocab_size": 49408
 
167
  "top_p": 1.0,
168
  "torch_dtype": null,
169
  "torchscript": false,
170
+ "transformers_version": "4.25.1",
171
  "typical_p": 1.0,
172
  "use_bfloat16": false
173
  },
text_encoder/config.json CHANGED
@@ -20,6 +20,6 @@
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
  "torch_dtype": "float32",
23
- "transformers_version": "4.26.0.dev0",
24
  "vocab_size": 49408
25
  }
 
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
  "torch_dtype": "float32",
23
+ "transformers_version": "4.25.1",
24
  "vocab_size": 49408
25
  }
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec2d0afd40d6f48684144806af0b70bbbbbe8b4ad22b8044799ceed6384449c5
3
- size 143486976
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97c4985fb9950a5670d12a92347d61b97fab0b538df076caadac5a6ae9586a96
3
+ size 3438366373
v1-inference.yaml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: []
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder