Synchronizing local compiler cache.
Browse files- .gitattributes +2 -0
- neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.22/inference/mistral/mistralai/Mistral-7B-Instruct-v0.3/1ccf5513f809a1c9b8e8.json +1 -0
- neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.22/inference/mistral/mistralai/Mistral-7B-Instruct-v0.3/d9d23b589ca43e531fa2.json +1 -0
- neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/gpt2/gpt2/d5e93094d604b84cb59a.json +1 -0
- neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/HuggingFaceTB/cosmo-1b/83c64ad31c0699e3053e.json +1 -0
- neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/HuggingFaceTB/cosmo-1b/9b7d5605b2dff8357fec.json +1 -0
- neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/HuggingFaceTB/cosmo-1b/c16f42c4d7e1ac059eaf.json +1 -0
- neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/meta-Llama/Llama-2-7b-chat-hf/413432928afdb7aad6db.json +1 -0
- neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/meta-Llama/Meta-Llama-3-8B/2b4c60684eea0835c2f6.json +1 -0
- neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/princeton-nlp/Sheared-LLaMA-1.3B/4ada30f8bc0a03d8ee0f.json +1 -0
- neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/mistral/optimum/mistral-1.1b-testing/3f196a5a5beadf2af838.json +1 -0
- neuronxcc-2.13.66.0+6dfecc895/MODULE_3ad368cca4ed8f043ee1+2c2d707e/compile_flags.txt +1 -0
- neuronxcc-2.13.66.0+6dfecc895/MODULE_3ad368cca4ed8f043ee1+2c2d707e/model.hlo_module.pb +3 -0
- neuronxcc-2.13.66.0+6dfecc895/MODULE_3ad368cca4ed8f043ee1+2c2d707e/model.neff +3 -0
- neuronxcc-2.13.66.0+6dfecc895/MODULE_72a51ff433ca11ac031b+2c2d707e/compile_flags.txt +1 -0
- neuronxcc-2.13.66.0+6dfecc895/MODULE_72a51ff433ca11ac031b+2c2d707e/model.hlo_module.pb +3 -0
- neuronxcc-2.13.66.0+6dfecc895/MODULE_72a51ff433ca11ac031b+2c2d707e/model.neff +3 -0
.gitattributes
CHANGED
@@ -3092,3 +3092,5 @@ neuronxcc-2.13.66.0+6dfecc895/MODULE_5207f7054fee18b4a110+2c2d707e/model.neff fi
|
|
3092 |
neuronxcc-2.13.66.0+6dfecc895/MODULE_b34e819f9371decdc13f+2c2d707e/model.neff filter=lfs diff=lfs merge=lfs -text
|
3093 |
neuronxcc-2.13.66.0+6dfecc895/MODULE_44846e9c6f250ca8cfb3+2c2d707e/model.neff filter=lfs diff=lfs merge=lfs -text
|
3094 |
neuronxcc-2.13.66.0+6dfecc895/MODULE_b99ac5e19f9eff20761d+2c2d707e/model.neff filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
3092 |
neuronxcc-2.13.66.0+6dfecc895/MODULE_b34e819f9371decdc13f+2c2d707e/model.neff filter=lfs diff=lfs merge=lfs -text
|
3093 |
neuronxcc-2.13.66.0+6dfecc895/MODULE_44846e9c6f250ca8cfb3+2c2d707e/model.neff filter=lfs diff=lfs merge=lfs -text
|
3094 |
neuronxcc-2.13.66.0+6dfecc895/MODULE_b99ac5e19f9eff20761d+2c2d707e/model.neff filter=lfs diff=lfs merge=lfs -text
|
3095 |
+
neuronxcc-2.13.66.0+6dfecc895/MODULE_3ad368cca4ed8f043ee1+2c2d707e/model.neff filter=lfs diff=lfs merge=lfs -text
|
3096 |
+
neuronxcc-2.13.66.0+6dfecc895/MODULE_72a51ff433ca11ac031b+2c2d707e/model.neff filter=lfs diff=lfs merge=lfs -text
|
neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.22/inference/mistral/mistralai/Mistral-7B-Instruct-v0.3/1ccf5513f809a1c9b8e8.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architectures": ["MistralForCausalLM"], "attention_dropout": 0.0, "bos_token_id": 1, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 4096, "initializer_range": 0.02, "intermediate_size": 14336, "max_position_embeddings": 32768, "model_type": "mistral", "neuron": {"auto_cast_type": "fp16", "batch_size": 4, "checkpoint_id": "mistralai/Mistral-7B-Instruct-v0.3", "checkpoint_revision": "83e9aa141f2e28c82232fea5325f54edf17c43de", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "num_cores": 2, "sequence_length": 4096, "task": "text-generation"}, "num_attention_heads": 32, "num_hidden_layers": 32, "num_key_value_heads": 8, "rms_norm_eps": 1e-05, "rope_theta": 1000000.0, "sliding_window": null, "tie_word_embeddings": false, "torch_dtype": "bfloat16", "use_cache": true, "vocab_size": 32768}
|
neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.22/inference/mistral/mistralai/Mistral-7B-Instruct-v0.3/d9d23b589ca43e531fa2.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architectures": ["MistralForCausalLM"], "attention_dropout": 0.0, "bos_token_id": 1, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 4096, "initializer_range": 0.02, "intermediate_size": 14336, "max_position_embeddings": 32768, "model_type": "mistral", "neuron": {"auto_cast_type": "bf16", "batch_size": 4, "checkpoint_id": "mistralai/Mistral-7B-Instruct-v0.3", "checkpoint_revision": "83e9aa141f2e28c82232fea5325f54edf17c43de", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "num_cores": 2, "sequence_length": 4096, "task": "text-generation"}, "num_attention_heads": 32, "num_hidden_layers": 32, "num_key_value_heads": 8, "rms_norm_eps": 1e-05, "rope_theta": 1000000.0, "sliding_window": null, "tie_word_embeddings": false, "torch_dtype": "bfloat16", "use_cache": true, "vocab_size": 32768}
|
neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/gpt2/gpt2/d5e93094d604b84cb59a.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"activation_function": "gelu_new", "architectures": ["GPT2LMHeadModel"], "attn_pdrop": 0.1, "bos_token_id": 50256, "embd_pdrop": 0.1, "eos_token_id": 50256, "initializer_range": 0.02, "layer_norm_epsilon": 1e-05, "model_type": "gpt2", "n_ctx": 1024, "n_embd": 768, "n_head": 12, "n_inner": null, "n_layer": 12, "n_positions": 1024, "neuron": {"auto_cast_type": "fp16", "batch_size": 4, "checkpoint_id": "gpt2", "checkpoint_revision": "607a30d783dfa663caf39e06633721c8d4cfcd7e", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "num_cores": 2, "sequence_length": 1024, "task": "text-generation"}, "reorder_and_upcast_attn": false, "resid_pdrop": 0.1, "scale_attn_by_inverse_layer_idx": false, "scale_attn_weights": true, "summary_activation": null, "summary_first_dropout": 0.1, "summary_proj_to_labels": true, "summary_type": "cls_index", "summary_use_proj": true, "task_specific_params": {"text-generation": {"do_sample": true, "max_length": 50}}, "use_cache": true, "vocab_size": 50257}
|
neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/HuggingFaceTB/cosmo-1b/83c64ad31c0699e3053e.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architectures": ["LlamaForCausalLM"], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 1, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 2048, "initializer_range": 0.02, "intermediate_size": 8192, "max_position_embeddings": 2048, "model_type": "llama", "neuron": {"auto_cast_type": "fp16", "batch_size": 4, "checkpoint_id": "HuggingFaceTB/cosmo-1b", "checkpoint_revision": "0d5e341cfe835dffc81b6186f9715c094889f8ce", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "num_cores": 2, "sequence_length": 2048, "task": "text-generation"}, "num_attention_heads": 16, "num_hidden_layers": 24, "num_key_value_heads": 16, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 10000.0, "tie_word_embeddings": false, "torch_dtype": "float32", "use_cache": true, "vocab_size": 32000}
|
neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/HuggingFaceTB/cosmo-1b/9b7d5605b2dff8357fec.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architectures": ["LlamaForCausalLM"], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 1, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 2048, "initializer_range": 0.02, "intermediate_size": 8192, "max_position_embeddings": 2048, "mlp_bias": false, "model_type": "llama", "neuron": {"auto_cast_type": "f16", "batch_size": 4, "checkpoint_id": "HuggingFaceTB/cosmo-1b", "checkpoint_revision": "0d5e341cfe835dffc81b6186f9715c094889f8ce", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "num_cores": 2, "sequence_length": 4096, "task": "text-generation"}, "num_attention_heads": 16, "num_hidden_layers": 24, "num_key_value_heads": 16, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 10000.0, "tie_word_embeddings": false, "torch_dtype": "float32", "use_cache": true, "vocab_size": 32000}
|
neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/HuggingFaceTB/cosmo-1b/c16f42c4d7e1ac059eaf.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architectures": ["LlamaForCausalLM"], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 1, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 2048, "initializer_range": 0.02, "intermediate_size": 8192, "max_position_embeddings": 2048, "mlp_bias": false, "model_type": "llama", "neuron": {"auto_cast_type": "f16", "batch_size": 4, "checkpoint_id": "HuggingFaceTB/cosmo-1b", "checkpoint_revision": "0d5e341cfe835dffc81b6186f9715c094889f8ce", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "num_cores": 2, "sequence_length": 2048, "task": "text-generation"}, "num_attention_heads": 16, "num_hidden_layers": 24, "num_key_value_heads": 16, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 10000.0, "tie_word_embeddings": false, "torch_dtype": "float32", "use_cache": true, "vocab_size": 32000}
|
neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/meta-Llama/Llama-2-7b-chat-hf/413432928afdb7aad6db.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architectures": ["LlamaForCausalLM"], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 1, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 4096, "initializer_range": 0.02, "intermediate_size": 11008, "max_position_embeddings": 4096, "model_type": "llama", "neuron": {"auto_cast_type": "fp16", "batch_size": 32, "checkpoint_id": "meta-Llama/Llama-2-7b-chat-hf", "checkpoint_revision": "f5db02db724555f92da89c216ac04704f23d4590", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "num_cores": 8, "sequence_length": 4096, "task": "text-generation"}, "num_attention_heads": 32, "num_hidden_layers": 32, "num_key_value_heads": 32, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 10000.0, "tie_word_embeddings": false, "torch_dtype": "float16", "use_cache": true, "vocab_size": 32000}
|
neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/meta-Llama/Meta-Llama-3-8B/2b4c60684eea0835c2f6.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architectures": ["LlamaForCausalLM"], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 128000, "eos_token_id": 128001, "hidden_act": "silu", "hidden_size": 4096, "initializer_range": 0.02, "intermediate_size": 14336, "max_position_embeddings": 8192, "model_type": "llama", "neuron": {"auto_cast_type": "fp16", "batch_size": 32, "checkpoint_id": "meta-Llama/Meta-Llama-3-8B", "checkpoint_revision": "62bd457b6fe961a42a631306577e622c83876cb6", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "num_cores": 8, "sequence_length": 4096, "task": "text-generation"}, "num_attention_heads": 32, "num_hidden_layers": 32, "num_key_value_heads": 8, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 500000.0, "tie_word_embeddings": false, "torch_dtype": "bfloat16", "use_cache": true, "vocab_size": 128256}
|
neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/llama/princeton-nlp/Sheared-LLaMA-1.3B/4ada30f8bc0a03d8ee0f.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architectures": ["LlamaForCausalLM"], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 1, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 2048, "initializer_range": 0.02, "intermediate_size": 5504, "max_position_embeddings": 4096, "model_type": "llama", "neuron": {"auto_cast_type": "fp16", "batch_size": 4, "checkpoint_id": "princeton-nlp/Sheared-LLaMA-1.3B", "checkpoint_revision": "a4b76938edbf571ea7d7d9904861cbdca08809b4", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "num_cores": 2, "sequence_length": 4096, "task": "text-generation"}, "num_attention_heads": 16, "num_hidden_layers": 24, "num_key_value_heads": 16, "pad_token_id": 0, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 10000.0, "tie_word_embeddings": false, "torch_dtype": "float32", "use_cache": true, "vocab_size": 32000}
|
neuronxcc-2.13.66.0+6dfecc895/0_REGISTRY/0.0.23.dev0/inference/mistral/optimum/mistral-1.1b-testing/3f196a5a5beadf2af838.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architectures": ["MistralForCausalLM"], "attention_dropout": 0.0, "bos_token_id": 1, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 2048, "initializer_range": 0.02, "intermediate_size": 5632, "max_position_embeddings": 32768, "model_type": "mistral", "neuron": {"auto_cast_type": "bf16", "batch_size": 4, "checkpoint_id": "optimum/mistral-1.1b-testing", "checkpoint_revision": "ce03bc8d47dbd2c173ff65f3a8de1325ba724195", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "num_cores": 2, "sequence_length": 4096, "task": "text-generation"}, "num_attention_heads": 32, "num_hidden_layers": 22, "num_key_value_heads": 4, "rms_norm_eps": 1e-05, "rope_theta": 1000000.0, "sliding_window": null, "tie_word_embeddings": false, "torch_dtype": "bfloat16", "use_cache": true, "vocab_size": 32000}
|
neuronxcc-2.13.66.0+6dfecc895/MODULE_3ad368cca4ed8f043ee1+2c2d707e/compile_flags.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
--model-type=transformer --auto-cast=none
|
neuronxcc-2.13.66.0+6dfecc895/MODULE_3ad368cca4ed8f043ee1+2c2d707e/model.hlo_module.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ecbd1cd70bfe8ada5b3fbce5a596f4e3095f943d5f36ae0a50d9bbd742f752a1
|
3 |
+
size 285616
|
neuronxcc-2.13.66.0+6dfecc895/MODULE_3ad368cca4ed8f043ee1+2c2d707e/model.neff
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4286ec59ad165e79a2da15ac2b1b5695204a770ecd9c65e91b925d08fa522f92
|
3 |
+
size 5213184
|
neuronxcc-2.13.66.0+6dfecc895/MODULE_72a51ff433ca11ac031b+2c2d707e/compile_flags.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
--model-type=transformer --auto-cast=none
|
neuronxcc-2.13.66.0+6dfecc895/MODULE_72a51ff433ca11ac031b+2c2d707e/model.hlo_module.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b1783d8707707da8120c0a26bdf23cc5fed2e80ed1d8d6dc178ad4fbf8325b1e
|
3 |
+
size 251670
|
neuronxcc-2.13.66.0+6dfecc895/MODULE_72a51ff433ca11ac031b+2c2d707e/model.neff
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:11cf6e65844253f4ce3af15a109468c8e1f68cc86f3885f32b1dfcd224f7d8e1
|
3 |
+
size 25723904
|