Synchronizing local compiler cache.
Browse files- neuronxcc-2.14.227.0+2d4f85be/0_REGISTRY/0.0.25.dev0/inference/stable-diffusion/stabilityai/stable-diffusion-xl-base-1.0/fc2537696b61a933633a.json +1 -0
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/model_index.json +2 -2
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/scheduler/scheduler_config.json +8 -3
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/text_encoder/config.json +2 -2
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/text_encoder/model.neuron +1 -1
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/text_encoder_2/config.json +2 -2
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/text_encoder_2/model.neuron +1 -1
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/tokenizer/special_tokens_map.json +1 -7
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/tokenizer_2/special_tokens_map.json +1 -7
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/unet/config.json +7 -2
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/unet/model.neuron +2 -2
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/vae_decoder/config.json +3 -3
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/vae_decoder/model.neuron +1 -1
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/vae_encoder/config.json +3 -3
- neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/vae_encoder/model.neuron +1 -1
neuronxcc-2.14.227.0+2d4f85be/0_REGISTRY/0.0.25.dev0/inference/stable-diffusion/stabilityai/stable-diffusion-xl-base-1.0/fc2537696b61a933633a.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model_type": "stable-diffusion", "text_encoder": {"architectures": ["CLIPTextModel"], "attention_dropout": 0.0, "dropout": 0.0, "hidden_act": "quick_gelu", "hidden_size": 768, "initializer_factor": 1.0, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-05, "max_position_embeddings": 77, "model_type": "clip_text_model", "neuron": {"auto_cast": "matmul", "auto_cast_type": "bf16", "compiler_type": "neuronx-cc", "compiler_version": "2.14.227.0+2d4f85be", "dynamic_batch_size": false, "inline_weights_to_neff": false, "optlevel": "2", "output_attentions": false, "output_hidden_states": false, "static_batch_size": 1, "static_sequence_length": 77}, "num_attention_heads": 12, "num_hidden_layers": 12, "output_hidden_states": true, "task": "feature-extraction", "vocab_size": 49408}, "text_encoder_2": {"architectures": ["CLIPTextModelWithProjection"], "attention_dropout": 0.0, "dropout": 0.0, "hidden_act": "gelu", "hidden_size": 1280, "initializer_factor": 1.0, "initializer_range": 0.02, "intermediate_size": 5120, "layer_norm_eps": 1e-05, "max_position_embeddings": 77, "model_type": "clip_text_model", "neuron": {"auto_cast": "matmul", "auto_cast_type": "bf16", "compiler_type": "neuronx-cc", "compiler_version": "2.14.227.0+2d4f85be", "dynamic_batch_size": false, "inline_weights_to_neff": false, "optlevel": "2", "output_attentions": false, "output_hidden_states": false, "static_batch_size": 1, "static_sequence_length": 77}, "num_attention_heads": 20, "num_hidden_layers": 32, "output_hidden_states": true, "task": "feature-extraction", "vocab_size": 49408}, "unet": {"_class_name": "UNet2DConditionModel", "act_fn": "silu", "addition_embed_type": "text_time", "addition_embed_type_num_heads": 64, "addition_time_embed_dim": 256, "attention_head_dim": [5, 10, 20], "attention_type": "default", "block_out_channels": [320, 640, 1280], "center_input_sample": false, "class_embed_type": null, "class_embeddings_concat": false, "conv_in_kernel": 3, "conv_out_kernel": 3, "cross_attention_dim": 2048, "cross_attention_norm": null, "down_block_types": ["DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"], "downsample_padding": 1, "dropout": 0.0, "dual_cross_attention": false, "encoder_hid_dim": null, "encoder_hid_dim_type": null, "flip_sin_to_cos": true, "freq_shift": 0, "in_channels": 4, "layers_per_block": 2, "mid_block_only_cross_attention": null, "mid_block_scale_factor": 1, "mid_block_type": "UNetMidBlock2DCrossAttn", "neuron": {"auto_cast": "matmul", "auto_cast_type": "bf16", "compiler_type": "neuronx-cc", "compiler_version": "2.14.227.0+2d4f85be", "dynamic_batch_size": false, "inline_weights_to_neff": true, "optlevel": "2", "output_attentions": false, "output_hidden_states": false, "static_batch_size": 1, "static_height": 96, "static_num_channels": 4, "static_sequence_length": 77, "static_vae_scale_factor": 8, "static_width": 96}, "norm_eps": 1e-05, "norm_num_groups": 32, "num_attention_heads": null, "num_class_embeds": null, "only_cross_attention": false, "out_channels": 4, "projection_class_embeddings_input_dim": 2816, "resnet_out_scale_factor": 1.0, "resnet_skip_time_act": false, "resnet_time_scale_shift": "default", "reverse_transformer_layers_per_block": null, "task": "semantic-segmentation", "time_cond_proj_dim": null, "time_embedding_act_fn": null, "time_embedding_dim": null, "time_embedding_type": "positional", "timestep_post_act": null, "transformer_layers_per_block": [1, 2, 10], "up_block_types": ["CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"], "upcast_attention": null, "use_linear_projection": true}}
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/model_index.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"_class_name": "StableDiffusionXLImg2ImgPipeline",
|
3 |
"_diffusers_version": "0.28.2",
|
4 |
-
"_name_or_path": "stabilityai/
|
5 |
"feature_extractor": [
|
6 |
null,
|
7 |
null
|
@@ -14,7 +14,7 @@
|
|
14 |
"requires_aesthetics_score": false,
|
15 |
"scheduler": [
|
16 |
"diffusers",
|
17 |
-
"
|
18 |
],
|
19 |
"text_encoder": [
|
20 |
"transformers",
|
|
|
1 |
{
|
2 |
"_class_name": "StableDiffusionXLImg2ImgPipeline",
|
3 |
"_diffusers_version": "0.28.2",
|
4 |
+
"_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
|
5 |
"feature_extractor": [
|
6 |
null,
|
7 |
null
|
|
|
14 |
"requires_aesthetics_score": false,
|
15 |
"scheduler": [
|
16 |
"diffusers",
|
17 |
+
"EulerDiscreteScheduler"
|
18 |
],
|
19 |
"text_encoder": [
|
20 |
"transformers",
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/scheduler/scheduler_config.json
CHANGED
@@ -1,18 +1,23 @@
|
|
1 |
{
|
2 |
-
"_class_name": "
|
3 |
"_diffusers_version": "0.28.2",
|
4 |
"beta_end": 0.012,
|
5 |
"beta_schedule": "scaled_linear",
|
6 |
"beta_start": 0.00085,
|
7 |
"clip_sample": false,
|
|
|
8 |
"interpolation_type": "linear",
|
9 |
"num_train_timesteps": 1000,
|
10 |
"prediction_type": "epsilon",
|
11 |
"rescale_betas_zero_snr": false,
|
12 |
"sample_max_value": 1.0,
|
13 |
"set_alpha_to_one": false,
|
|
|
|
|
14 |
"skip_prk_steps": true,
|
15 |
"steps_offset": 1,
|
16 |
-
"timestep_spacing": "
|
17 |
-
"
|
|
|
|
|
18 |
}
|
|
|
1 |
{
|
2 |
+
"_class_name": "EulerDiscreteScheduler",
|
3 |
"_diffusers_version": "0.28.2",
|
4 |
"beta_end": 0.012,
|
5 |
"beta_schedule": "scaled_linear",
|
6 |
"beta_start": 0.00085,
|
7 |
"clip_sample": false,
|
8 |
+
"final_sigmas_type": "zero",
|
9 |
"interpolation_type": "linear",
|
10 |
"num_train_timesteps": 1000,
|
11 |
"prediction_type": "epsilon",
|
12 |
"rescale_betas_zero_snr": false,
|
13 |
"sample_max_value": 1.0,
|
14 |
"set_alpha_to_one": false,
|
15 |
+
"sigma_max": null,
|
16 |
+
"sigma_min": null,
|
17 |
"skip_prk_steps": true,
|
18 |
"steps_offset": 1,
|
19 |
+
"timestep_spacing": "leading",
|
20 |
+
"timestep_type": "discrete",
|
21 |
+
"trained_betas": null,
|
22 |
+
"use_karras_sigmas": false
|
23 |
}
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/text_encoder/config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/home/runner/.cache/huggingface/hub/models--stabilityai--
|
3 |
"architectures": [
|
4 |
"CLIPTextModel"
|
5 |
],
|
@@ -45,6 +45,6 @@
|
|
45 |
"task": "feature-extraction",
|
46 |
"torch_dtype": "float16",
|
47 |
"torchscript": true,
|
48 |
-
"transformers_version": "4.
|
49 |
"vocab_size": 49408
|
50 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "/home/runner/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/text_encoder",
|
3 |
"architectures": [
|
4 |
"CLIPTextModel"
|
5 |
],
|
|
|
45 |
"task": "feature-extraction",
|
46 |
"torch_dtype": "float16",
|
47 |
"torchscript": true,
|
48 |
+
"transformers_version": "4.43.2",
|
49 |
"vocab_size": 49408
|
50 |
}
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/text_encoder/model.neuron
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 496513017
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:668abeb8945e09e594f79f570903cb66e91de057f06b0fa8475596de643a27f6
|
3 |
size 496513017
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/text_encoder_2/config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/home/runner/.cache/huggingface/hub/models--stabilityai--
|
3 |
"architectures": [
|
4 |
"CLIPTextModelWithProjection"
|
5 |
],
|
@@ -45,6 +45,6 @@
|
|
45 |
"task": "feature-extraction",
|
46 |
"torch_dtype": "float16",
|
47 |
"torchscript": true,
|
48 |
-
"transformers_version": "4.
|
49 |
"vocab_size": 49408
|
50 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "/home/runner/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/text_encoder_2",
|
3 |
"architectures": [
|
4 |
"CLIPTextModelWithProjection"
|
5 |
],
|
|
|
45 |
"task": "feature-extraction",
|
46 |
"torch_dtype": "float16",
|
47 |
"torchscript": true,
|
48 |
+
"transformers_version": "4.43.2",
|
49 |
"vocab_size": 49408
|
50 |
}
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/text_encoder_2/model.neuron
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2803260907
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afa6c98f468e2ded4618596f7728cb7d4825860999993ba654a9548940a5169c
|
3 |
size 2803260907
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/tokenizer/special_tokens_map.json
CHANGED
@@ -13,13 +13,7 @@
|
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
},
|
16 |
-
"pad_token":
|
17 |
-
"content": "<|endoftext|>",
|
18 |
-
"lstrip": false,
|
19 |
-
"normalized": true,
|
20 |
-
"rstrip": false,
|
21 |
-
"single_word": false
|
22 |
-
},
|
23 |
"unk_token": {
|
24 |
"content": "<|endoftext|>",
|
25 |
"lstrip": false,
|
|
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
},
|
16 |
+
"pad_token": "<|endoftext|>",
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
"unk_token": {
|
18 |
"content": "<|endoftext|>",
|
19 |
"lstrip": false,
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/tokenizer_2/special_tokens_map.json
CHANGED
@@ -13,13 +13,7 @@
|
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
},
|
16 |
-
"pad_token":
|
17 |
-
"content": "!",
|
18 |
-
"lstrip": false,
|
19 |
-
"normalized": false,
|
20 |
-
"rstrip": false,
|
21 |
-
"single_word": false
|
22 |
-
},
|
23 |
"unk_token": {
|
24 |
"content": "<|endoftext|>",
|
25 |
"lstrip": false,
|
|
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
},
|
16 |
+
"pad_token": "!",
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
"unk_token": {
|
18 |
"content": "<|endoftext|>",
|
19 |
"lstrip": false,
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/unet/config.json
CHANGED
@@ -2,7 +2,12 @@
|
|
2 |
"_class_name": "UNet2DConditionModel",
|
3 |
"_commit_hash": null,
|
4 |
"_diffusers_version": "0.28.2",
|
5 |
-
"_name_or_path": "/home/runner/.cache/huggingface/hub/models--stabilityai--
|
|
|
|
|
|
|
|
|
|
|
6 |
"act_fn": "silu",
|
7 |
"addition_embed_type": "text_time",
|
8 |
"addition_embed_type_num_heads": 64,
|
@@ -81,7 +86,7 @@
|
|
81 |
"resnet_skip_time_act": false,
|
82 |
"resnet_time_scale_shift": "default",
|
83 |
"reverse_transformer_layers_per_block": null,
|
84 |
-
"sample_size":
|
85 |
"task": "semantic-segmentation",
|
86 |
"time_cond_proj_dim": null,
|
87 |
"time_embedding_act_fn": null,
|
|
|
2 |
"_class_name": "UNet2DConditionModel",
|
3 |
"_commit_hash": null,
|
4 |
"_diffusers_version": "0.28.2",
|
5 |
+
"_name_or_path": "/home/runner/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/unet",
|
6 |
+
"_use_default_values": [
|
7 |
+
"attention_type",
|
8 |
+
"reverse_transformer_layers_per_block",
|
9 |
+
"dropout"
|
10 |
+
],
|
11 |
"act_fn": "silu",
|
12 |
"addition_embed_type": "text_time",
|
13 |
"addition_embed_type_num_heads": 64,
|
|
|
86 |
"resnet_skip_time_act": false,
|
87 |
"resnet_time_scale_shift": "default",
|
88 |
"reverse_transformer_layers_per_block": null,
|
89 |
+
"sample_size": 128,
|
90 |
"task": "semantic-segmentation",
|
91 |
"time_cond_proj_dim": null,
|
92 |
"time_embedding_act_fn": null,
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/unet/model.neuron
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:64778fc67c2662e8fbc097037c3e250d8c1fe6affdb20bd1e8c3744800cc87e9
|
3 |
+
size 4153904445
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/vae_decoder/config.json
CHANGED
@@ -2,10 +2,10 @@
|
|
2 |
"_class_name": "AutoencoderKL",
|
3 |
"_commit_hash": null,
|
4 |
"_diffusers_version": "0.28.2",
|
5 |
-
"_name_or_path": "/home/runner/.cache/huggingface/hub/models--stabilityai--
|
6 |
"_use_default_values": [
|
7 |
-
"
|
8 |
-
"
|
9 |
],
|
10 |
"act_fn": "silu",
|
11 |
"block_out_channels": [
|
|
|
2 |
"_class_name": "AutoencoderKL",
|
3 |
"_commit_hash": null,
|
4 |
"_diffusers_version": "0.28.2",
|
5 |
+
"_name_or_path": "/home/runner/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/vae",
|
6 |
"_use_default_values": [
|
7 |
+
"latents_mean",
|
8 |
+
"latents_std"
|
9 |
],
|
10 |
"act_fn": "silu",
|
11 |
"block_out_channels": [
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/vae_decoder/model.neuron
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 445006387
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85f1841b76a420608d82c4206e7648a67df1bfa65fefed0e3f8d742ac96c1684
|
3 |
size 445006387
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/vae_encoder/config.json
CHANGED
@@ -2,10 +2,10 @@
|
|
2 |
"_class_name": "AutoencoderKL",
|
3 |
"_commit_hash": null,
|
4 |
"_diffusers_version": "0.28.2",
|
5 |
-
"_name_or_path": "/home/runner/.cache/huggingface/hub/models--stabilityai--
|
6 |
"_use_default_values": [
|
7 |
-
"
|
8 |
-
"
|
9 |
],
|
10 |
"act_fn": "silu",
|
11 |
"block_out_channels": [
|
|
|
2 |
"_class_name": "AutoencoderKL",
|
3 |
"_commit_hash": null,
|
4 |
"_diffusers_version": "0.28.2",
|
5 |
+
"_name_or_path": "/home/runner/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/vae",
|
6 |
"_use_default_values": [
|
7 |
+
"latents_mean",
|
8 |
+
"latents_std"
|
9 |
],
|
10 |
"act_fn": "silu",
|
11 |
"block_out_channels": [
|
neuronxcc-2.14.227.0+2d4f85be/MODULE_fc2537696b61a933633a/vae_encoder/model.neuron
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 252422707
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:556b07afd4332ccb2936f70157965da45383201507d370c7bc5860e6879859cf
|
3 |
size 252422707
|