dacorvo HF Staff commited on
Commit
bad311c
·
verified ·
1 Parent(s): 5e38709

Synchronizing local compiler cache.

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +9 -0
  2. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-2b-instruct/1002e526666aa6d374df.json +58 -0
  3. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-2b-instruct/415a488f8e9bfd810f69.json +58 -0
  4. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-2b-instruct/83949fddd59377cbb674.json +58 -0
  5. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-2b-instruct/a4e730448c44c446c2c5.json +58 -0
  6. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-8b-instruct/08eeed134fa4e527271c.json +58 -0
  7. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-8b-instruct/1fe296005f1eff947583.json +58 -0
  8. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-8b-instruct/30dc5285b1aae437b520.json +58 -0
  9. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-8b-instruct/64c646616b24b2a8d43c.json +58 -0
  10. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-8b-instruct/9aed265427cc6cb86d4b.json +58 -0
  11. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/0740ab092d02484487fb.json +62 -0
  12. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/0fc67cd324a7c1a05100.json +62 -0
  13. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/226856c4c5cdfd69aa89.json +62 -0
  14. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/341a9cc68e1b4eded838.json +62 -0
  15. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/3ac9f00c63887961a784.json +62 -0
  16. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/8d70cd76e737aaa4eaa4.json +62 -0
  17. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/ad6ef9f317fb8e1ab4f1.json +62 -0
  18. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/lmsys/vicuna-7b-v1.5/e6eb0587815d37abaf03.json +56 -0
  19. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/lmsys/vicuna-7b-v1.5/eb93c62140353ba54657.json +56 -0
  20. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/princeton-nlp/Sheared-LLaMA-1.3B/8a8971a0da11451cb8a9.json +56 -0
  21. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/princeton-nlp/Sheared-LLaMA-1.3B/d0e265b870b2f9fc91c5.json +56 -0
  22. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/unsloth/Llama-3.2-1B/62b8172ee838a29e1e7f.json +63 -0
  23. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/unsloth/Llama-3.2-1B/f19511a53b988b95bb49.json +63 -0
  24. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Maverick-17B-128E-Instruct/115ac93cb9174db4e67f.json +190 -0
  25. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Maverick-17B-128E-Instruct/f70dea2be77b8d1dc8ed.json +190 -0
  26. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/07656ae2a159358e76ff.json +220 -0
  27. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/0b9d19926bec30ac4419.json +220 -0
  28. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/236b23417ad1c79fbb5f.json +220 -0
  29. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/2a96ee4639be3796f16b.json +220 -0
  30. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/621024dbf42a03b7babc.json +220 -0
  31. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/923fcd4cf259579b5e4a.json +220 -0
  32. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/a90ff1e995579ec8deee.json +220 -0
  33. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/c3450e1affaca20e05e3.json +220 -0
  34. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/d33cefe3ad2c77e0544b.json +220 -0
  35. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/f71e619c760aaf9e2888.json +220 -0
  36. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/phi3/microsoft/Phi-3-mini-4k-instruct/3558b5ac7259b6bcc01a.json +62 -0
  37. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/phi3/microsoft/phi-4/38f87915d107c55b7651.json +58 -0
  38. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/phi3/microsoft/phi-4/473a4f2462bcd8b3f136.json +58 -0
  39. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-0.5B/300b37dace1ce2c0b783.json +82 -0
  40. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-1.5B/8d982941157412579546.json +86 -0
  41. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-1.5B/dea81904d370c8b20332.json +86 -0
  42. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-14B/877be4240e4a459b2a14.json +105 -0
  43. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-14B/c05ba11ec3a01458a2e6.json +105 -0
  44. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-32B-Instruct/2e5ba8f801dbc7a16c3c.json +121 -0
  45. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-32B-Instruct/7182911a8d43e7187430.json +121 -0
  46. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-72B-Instruct/f3b6f76004dc3d143c7e.json +137 -0
  47. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-7B-Instruct/2ffd57bb17f3a35919c6.json +85 -0
  48. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-7B-Instruct/85aeb3e82bb9189fa256.json +85 -0
  49. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-7B-Instruct/b256ce0e46280fedadb4.json +85 -0
  50. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-7B-Instruct/bbe60ad043d0675f6bd9.json +85 -0
.gitattributes CHANGED
@@ -5262,3 +5262,12 @@ neuronxcc-2.21.18209.0+043b1bf7/MODULE_f9260d832dabcf299e0e+877608f3/model.neff
5262
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_1b5caf61147adc2d934e+747527b0/model.neff filter=lfs diff=lfs merge=lfs -text
5263
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_1b5caf61147adc2d934e+747527b0/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
5264
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_58bfab3ea35f7cda10d3+877608f3/model.neff filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
5262
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_1b5caf61147adc2d934e+747527b0/model.neff filter=lfs diff=lfs merge=lfs -text
5263
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_1b5caf61147adc2d934e+747527b0/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
5264
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_58bfab3ea35f7cda10d3+877608f3/model.neff filter=lfs diff=lfs merge=lfs -text
5265
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_04def5b319953baacddd+a9d440f5/model.neff filter=lfs diff=lfs merge=lfs -text
5266
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_04def5b319953baacddd+a9d440f5/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
5267
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_63b08aa574a103e133be+a9d440f5/model.neff filter=lfs diff=lfs merge=lfs -text
5268
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_63b08aa574a103e133be+a9d440f5/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
5269
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_7585b7b81ecc283af772+a9d440f5/model.neff filter=lfs diff=lfs merge=lfs -text
5270
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_7585b7b81ecc283af772+a9d440f5/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
5271
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_a89678b39464c33c1815+ed72d204/model.neff filter=lfs diff=lfs merge=lfs -text
5272
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_bae931052fc7117dae12+a9d440f5/model.neff filter=lfs diff=lfs merge=lfs -text
5273
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_bae931052fc7117dae12+a9d440f5/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-2b-instruct/1002e526666aa6d374df.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "ibm-granite/granite-3.1-2b-instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "GraniteForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.1,
10
+ "attention_multiplier": 0.015625,
11
+ "embedding_multiplier": 12.0,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2048,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 8192,
16
+ "logits_scaling": 8.0,
17
+ "max_position_embeddings": 131072,
18
+ "mlp_bias": false,
19
+ "model_type": "granite",
20
+ "neuron": {
21
+ "_serialized_key": "NxDNeuronConfig",
22
+ "batch_size": 8,
23
+ "capacity_factor": null,
24
+ "checkpoint_id": "ibm-granite/granite-3.1-2b-instruct",
25
+ "checkpoint_revision": "bbc2aed595bd38bd770263dc3ab831db9794441d",
26
+ "continuous_batching": true,
27
+ "enable_bucketing": false,
28
+ "ep_degree": 1,
29
+ "fused_qkv": true,
30
+ "glu_mlp": true,
31
+ "local_ranks_size": 2,
32
+ "max_batch_size": 8,
33
+ "max_context_length": 4096,
34
+ "max_topk": 256,
35
+ "n_active_tokens": 4096,
36
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
37
+ "on_device_sampling": true,
38
+ "optimum_neuron_version": "0.4.0",
39
+ "output_logits": false,
40
+ "pp_degree": 1,
41
+ "sequence_length": 4096,
42
+ "speculation_length": 0,
43
+ "start_rank_id": 0,
44
+ "target": "trn1",
45
+ "torch_dtype": "bfloat16",
46
+ "tp_degree": 2
47
+ },
48
+ "num_attention_heads": 32,
49
+ "num_hidden_layers": 40,
50
+ "num_key_value_heads": 8,
51
+ "residual_multiplier": 0.22,
52
+ "rms_norm_eps": 1e-05,
53
+ "rope_scaling": null,
54
+ "rope_theta": 5000000.0,
55
+ "tie_word_embeddings": true,
56
+ "use_cache": true,
57
+ "vocab_size": 49155
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-2b-instruct/415a488f8e9bfd810f69.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "ibm-granite/granite-3.1-2b-instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "GraniteForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.1,
10
+ "attention_multiplier": 0.015625,
11
+ "embedding_multiplier": 12.0,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2048,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 8192,
16
+ "logits_scaling": 8.0,
17
+ "max_position_embeddings": 131072,
18
+ "mlp_bias": false,
19
+ "model_type": "granite",
20
+ "neuron": {
21
+ "_serialized_key": "NxDNeuronConfig",
22
+ "batch_size": 1,
23
+ "capacity_factor": null,
24
+ "checkpoint_id": "ibm-granite/granite-3.1-2b-instruct",
25
+ "checkpoint_revision": "bbc2aed595bd38bd770263dc3ab831db9794441d",
26
+ "continuous_batching": false,
27
+ "enable_bucketing": false,
28
+ "ep_degree": 1,
29
+ "fused_qkv": true,
30
+ "glu_mlp": true,
31
+ "local_ranks_size": 8,
32
+ "max_batch_size": 1,
33
+ "max_context_length": 4096,
34
+ "max_topk": 256,
35
+ "n_active_tokens": 4096,
36
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
37
+ "on_device_sampling": true,
38
+ "optimum_neuron_version": "0.4.0",
39
+ "output_logits": false,
40
+ "pp_degree": 1,
41
+ "sequence_length": 4096,
42
+ "speculation_length": 0,
43
+ "start_rank_id": 0,
44
+ "target": "trn1",
45
+ "torch_dtype": "bfloat16",
46
+ "tp_degree": 8
47
+ },
48
+ "num_attention_heads": 32,
49
+ "num_hidden_layers": 40,
50
+ "num_key_value_heads": 8,
51
+ "residual_multiplier": 0.22,
52
+ "rms_norm_eps": 1e-05,
53
+ "rope_scaling": null,
54
+ "rope_theta": 5000000.0,
55
+ "tie_word_embeddings": true,
56
+ "use_cache": true,
57
+ "vocab_size": 49155
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-2b-instruct/83949fddd59377cbb674.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "ibm-granite/granite-3.1-2b-instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "GraniteForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.1,
10
+ "attention_multiplier": 0.015625,
11
+ "embedding_multiplier": 12.0,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2048,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 8192,
16
+ "logits_scaling": 8.0,
17
+ "max_position_embeddings": 131072,
18
+ "mlp_bias": false,
19
+ "model_type": "granite",
20
+ "neuron": {
21
+ "_serialized_key": "NxDNeuronConfig",
22
+ "batch_size": 1,
23
+ "capacity_factor": null,
24
+ "checkpoint_id": "ibm-granite/granite-3.1-2b-instruct",
25
+ "checkpoint_revision": "bbc2aed595bd38bd770263dc3ab831db9794441d",
26
+ "continuous_batching": false,
27
+ "enable_bucketing": false,
28
+ "ep_degree": 1,
29
+ "fused_qkv": true,
30
+ "glu_mlp": true,
31
+ "local_ranks_size": 2,
32
+ "max_batch_size": 1,
33
+ "max_context_length": 4096,
34
+ "max_topk": 256,
35
+ "n_active_tokens": 4096,
36
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
37
+ "on_device_sampling": true,
38
+ "optimum_neuron_version": "0.4.0",
39
+ "output_logits": false,
40
+ "pp_degree": 1,
41
+ "sequence_length": 4096,
42
+ "speculation_length": 0,
43
+ "start_rank_id": 0,
44
+ "target": "trn1",
45
+ "torch_dtype": "bfloat16",
46
+ "tp_degree": 2
47
+ },
48
+ "num_attention_heads": 32,
49
+ "num_hidden_layers": 40,
50
+ "num_key_value_heads": 8,
51
+ "residual_multiplier": 0.22,
52
+ "rms_norm_eps": 1e-05,
53
+ "rope_scaling": null,
54
+ "rope_theta": 5000000.0,
55
+ "tie_word_embeddings": true,
56
+ "use_cache": true,
57
+ "vocab_size": 49155
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-2b-instruct/a4e730448c44c446c2c5.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "ibm-granite/granite-3.1-2b-instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "GraniteForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.1,
10
+ "attention_multiplier": 0.015625,
11
+ "embedding_multiplier": 12.0,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2048,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 8192,
16
+ "logits_scaling": 8.0,
17
+ "max_position_embeddings": 131072,
18
+ "mlp_bias": false,
19
+ "model_type": "granite",
20
+ "neuron": {
21
+ "_serialized_key": "NxDNeuronConfig",
22
+ "batch_size": 32,
23
+ "capacity_factor": null,
24
+ "checkpoint_id": "ibm-granite/granite-3.1-2b-instruct",
25
+ "checkpoint_revision": "bbc2aed595bd38bd770263dc3ab831db9794441d",
26
+ "continuous_batching": true,
27
+ "enable_bucketing": false,
28
+ "ep_degree": 1,
29
+ "fused_qkv": true,
30
+ "glu_mlp": true,
31
+ "local_ranks_size": 8,
32
+ "max_batch_size": 32,
33
+ "max_context_length": 4096,
34
+ "max_topk": 256,
35
+ "n_active_tokens": 4096,
36
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
37
+ "on_device_sampling": true,
38
+ "optimum_neuron_version": "0.4.0",
39
+ "output_logits": false,
40
+ "pp_degree": 1,
41
+ "sequence_length": 4096,
42
+ "speculation_length": 0,
43
+ "start_rank_id": 0,
44
+ "target": "trn1",
45
+ "torch_dtype": "bfloat16",
46
+ "tp_degree": 8
47
+ },
48
+ "num_attention_heads": 32,
49
+ "num_hidden_layers": 40,
50
+ "num_key_value_heads": 8,
51
+ "residual_multiplier": 0.22,
52
+ "rms_norm_eps": 1e-05,
53
+ "rope_scaling": null,
54
+ "rope_theta": 5000000.0,
55
+ "tie_word_embeddings": true,
56
+ "use_cache": true,
57
+ "vocab_size": 49155
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-8b-instruct/08eeed134fa4e527271c.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "ibm-granite/granite-3.1-8b-instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "GraniteForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.1,
10
+ "attention_multiplier": 0.0078125,
11
+ "embedding_multiplier": 12.0,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12800,
16
+ "logits_scaling": 16.0,
17
+ "max_position_embeddings": 131072,
18
+ "mlp_bias": false,
19
+ "model_type": "granite",
20
+ "neuron": {
21
+ "_serialized_key": "NxDNeuronConfig",
22
+ "batch_size": 4,
23
+ "capacity_factor": null,
24
+ "checkpoint_id": "ibm-granite/granite-3.1-8b-instruct",
25
+ "checkpoint_revision": "4009206d5fc95d2e65a7b7633e159d6e97e25d35",
26
+ "continuous_batching": true,
27
+ "enable_bucketing": false,
28
+ "ep_degree": 1,
29
+ "fused_qkv": true,
30
+ "glu_mlp": true,
31
+ "local_ranks_size": 2,
32
+ "max_batch_size": 4,
33
+ "max_context_length": 4096,
34
+ "max_topk": 256,
35
+ "n_active_tokens": 4096,
36
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
37
+ "on_device_sampling": true,
38
+ "optimum_neuron_version": "0.4.0",
39
+ "output_logits": false,
40
+ "pp_degree": 1,
41
+ "sequence_length": 4096,
42
+ "speculation_length": 0,
43
+ "start_rank_id": 0,
44
+ "target": "trn1",
45
+ "torch_dtype": "bfloat16",
46
+ "tp_degree": 2
47
+ },
48
+ "num_attention_heads": 32,
49
+ "num_hidden_layers": 40,
50
+ "num_key_value_heads": 8,
51
+ "residual_multiplier": 0.22,
52
+ "rms_norm_eps": 1e-05,
53
+ "rope_scaling": null,
54
+ "rope_theta": 10000000.0,
55
+ "tie_word_embeddings": true,
56
+ "use_cache": true,
57
+ "vocab_size": 49155
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-8b-instruct/1fe296005f1eff947583.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "ibm-granite/granite-3.1-8b-instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "GraniteForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.1,
10
+ "attention_multiplier": 0.0078125,
11
+ "embedding_multiplier": 12.0,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12800,
16
+ "logits_scaling": 16.0,
17
+ "max_position_embeddings": 131072,
18
+ "mlp_bias": false,
19
+ "model_type": "granite",
20
+ "neuron": {
21
+ "_serialized_key": "NxDNeuronConfig",
22
+ "batch_size": 1,
23
+ "capacity_factor": null,
24
+ "checkpoint_id": "ibm-granite/granite-3.1-8b-instruct",
25
+ "checkpoint_revision": "4009206d5fc95d2e65a7b7633e159d6e97e25d35",
26
+ "continuous_batching": false,
27
+ "enable_bucketing": false,
28
+ "ep_degree": 1,
29
+ "fused_qkv": true,
30
+ "glu_mlp": true,
31
+ "local_ranks_size": 2,
32
+ "max_batch_size": 1,
33
+ "max_context_length": 4096,
34
+ "max_topk": 256,
35
+ "n_active_tokens": 4096,
36
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
37
+ "on_device_sampling": true,
38
+ "optimum_neuron_version": "0.4.0",
39
+ "output_logits": false,
40
+ "pp_degree": 1,
41
+ "sequence_length": 4096,
42
+ "speculation_length": 0,
43
+ "start_rank_id": 0,
44
+ "target": "trn1",
45
+ "torch_dtype": "bfloat16",
46
+ "tp_degree": 2
47
+ },
48
+ "num_attention_heads": 32,
49
+ "num_hidden_layers": 40,
50
+ "num_key_value_heads": 8,
51
+ "residual_multiplier": 0.22,
52
+ "rms_norm_eps": 1e-05,
53
+ "rope_scaling": null,
54
+ "rope_theta": 10000000.0,
55
+ "tie_word_embeddings": true,
56
+ "use_cache": true,
57
+ "vocab_size": 49155
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-8b-instruct/30dc5285b1aae437b520.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "ibm-granite/granite-3.1-8b-instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "GraniteForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.1,
10
+ "attention_multiplier": 0.0078125,
11
+ "embedding_multiplier": 12.0,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12800,
16
+ "logits_scaling": 16.0,
17
+ "max_position_embeddings": 131072,
18
+ "mlp_bias": false,
19
+ "model_type": "granite",
20
+ "neuron": {
21
+ "_serialized_key": "NxDNeuronConfig",
22
+ "batch_size": 8,
23
+ "capacity_factor": null,
24
+ "checkpoint_id": "ibm-granite/granite-3.1-8b-instruct",
25
+ "checkpoint_revision": "4009206d5fc95d2e65a7b7633e159d6e97e25d35",
26
+ "continuous_batching": true,
27
+ "enable_bucketing": false,
28
+ "ep_degree": 1,
29
+ "fused_qkv": true,
30
+ "glu_mlp": true,
31
+ "local_ranks_size": 2,
32
+ "max_batch_size": 8,
33
+ "max_context_length": 4096,
34
+ "max_topk": 256,
35
+ "n_active_tokens": 4096,
36
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
37
+ "on_device_sampling": true,
38
+ "optimum_neuron_version": "0.4.0",
39
+ "output_logits": false,
40
+ "pp_degree": 1,
41
+ "sequence_length": 4096,
42
+ "speculation_length": 0,
43
+ "start_rank_id": 0,
44
+ "target": "trn1",
45
+ "torch_dtype": "bfloat16",
46
+ "tp_degree": 2
47
+ },
48
+ "num_attention_heads": 32,
49
+ "num_hidden_layers": 40,
50
+ "num_key_value_heads": 8,
51
+ "residual_multiplier": 0.22,
52
+ "rms_norm_eps": 1e-05,
53
+ "rope_scaling": null,
54
+ "rope_theta": 10000000.0,
55
+ "tie_word_embeddings": true,
56
+ "use_cache": true,
57
+ "vocab_size": 49155
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-8b-instruct/64c646616b24b2a8d43c.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "ibm-granite/granite-3.1-8b-instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "GraniteForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.1,
10
+ "attention_multiplier": 0.0078125,
11
+ "embedding_multiplier": 12.0,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12800,
16
+ "logits_scaling": 16.0,
17
+ "max_position_embeddings": 131072,
18
+ "mlp_bias": false,
19
+ "model_type": "granite",
20
+ "neuron": {
21
+ "_serialized_key": "NxDNeuronConfig",
22
+ "batch_size": 1,
23
+ "capacity_factor": null,
24
+ "checkpoint_id": "ibm-granite/granite-3.1-8b-instruct",
25
+ "checkpoint_revision": "4009206d5fc95d2e65a7b7633e159d6e97e25d35",
26
+ "continuous_batching": false,
27
+ "enable_bucketing": false,
28
+ "ep_degree": 1,
29
+ "fused_qkv": true,
30
+ "glu_mlp": true,
31
+ "local_ranks_size": 8,
32
+ "max_batch_size": 1,
33
+ "max_context_length": 4096,
34
+ "max_topk": 256,
35
+ "n_active_tokens": 4096,
36
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
37
+ "on_device_sampling": true,
38
+ "optimum_neuron_version": "0.4.0",
39
+ "output_logits": false,
40
+ "pp_degree": 1,
41
+ "sequence_length": 4096,
42
+ "speculation_length": 0,
43
+ "start_rank_id": 0,
44
+ "target": "trn1",
45
+ "torch_dtype": "bfloat16",
46
+ "tp_degree": 8
47
+ },
48
+ "num_attention_heads": 32,
49
+ "num_hidden_layers": 40,
50
+ "num_key_value_heads": 8,
51
+ "residual_multiplier": 0.22,
52
+ "rms_norm_eps": 1e-05,
53
+ "rope_scaling": null,
54
+ "rope_theta": 10000000.0,
55
+ "tie_word_embeddings": true,
56
+ "use_cache": true,
57
+ "vocab_size": 49155
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/granite/ibm-granite/granite-3.1-8b-instruct/9aed265427cc6cb86d4b.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "ibm-granite/granite-3.1-8b-instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "GraniteForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.1,
10
+ "attention_multiplier": 0.0078125,
11
+ "embedding_multiplier": 12.0,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 12800,
16
+ "logits_scaling": 16.0,
17
+ "max_position_embeddings": 131072,
18
+ "mlp_bias": false,
19
+ "model_type": "granite",
20
+ "neuron": {
21
+ "_serialized_key": "NxDNeuronConfig",
22
+ "batch_size": 32,
23
+ "capacity_factor": null,
24
+ "checkpoint_id": "ibm-granite/granite-3.1-8b-instruct",
25
+ "checkpoint_revision": "4009206d5fc95d2e65a7b7633e159d6e97e25d35",
26
+ "continuous_batching": true,
27
+ "enable_bucketing": false,
28
+ "ep_degree": 1,
29
+ "fused_qkv": true,
30
+ "glu_mlp": true,
31
+ "local_ranks_size": 8,
32
+ "max_batch_size": 32,
33
+ "max_context_length": 4096,
34
+ "max_topk": 256,
35
+ "n_active_tokens": 4096,
36
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
37
+ "on_device_sampling": true,
38
+ "optimum_neuron_version": "0.4.0",
39
+ "output_logits": false,
40
+ "pp_degree": 1,
41
+ "sequence_length": 4096,
42
+ "speculation_length": 0,
43
+ "start_rank_id": 0,
44
+ "target": "trn1",
45
+ "torch_dtype": "bfloat16",
46
+ "tp_degree": 8
47
+ },
48
+ "num_attention_heads": 32,
49
+ "num_hidden_layers": 40,
50
+ "num_key_value_heads": 8,
51
+ "residual_multiplier": 0.22,
52
+ "rms_norm_eps": 1e-05,
53
+ "rope_scaling": null,
54
+ "rope_theta": 10000000.0,
55
+ "tie_word_embeddings": true,
56
+ "use_cache": true,
57
+ "vocab_size": 49155
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/0740ab092d02484487fb.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 14336,
15
+ "max_position_embeddings": 131072,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 4,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
23
+ "checkpoint_revision": "6a6f4aa4197940add57724a7707d069478df56b1",
24
+ "continuous_batching": true,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 4,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 32,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 8.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": false,
60
+ "use_cache": true,
61
+ "vocab_size": 128256
62
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/0fc67cd324a7c1a05100.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 14336,
15
+ "max_position_embeddings": 131072,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 1,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
23
+ "checkpoint_revision": "6a6f4aa4197940add57724a7707d069478df56b1",
24
+ "continuous_batching": false,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 1,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 32,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 8.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": false,
60
+ "use_cache": true,
61
+ "vocab_size": 128256
62
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/226856c4c5cdfd69aa89.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 14336,
15
+ "max_position_embeddings": 131072,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 32,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
23
+ "checkpoint_revision": "6a6f4aa4197940add57724a7707d069478df56b1",
24
+ "continuous_batching": true,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 8,
30
+ "max_batch_size": 32,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 8
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 32,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 8.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": false,
60
+ "use_cache": true,
61
+ "vocab_size": 128256
62
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/341a9cc68e1b4eded838.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 14336,
15
+ "max_position_embeddings": 131072,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 8,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
23
+ "checkpoint_revision": "6a6f4aa4197940add57724a7707d069478df56b1",
24
+ "continuous_batching": true,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 8,
30
+ "max_batch_size": 8,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 8
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 32,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 8.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": false,
60
+ "use_cache": true,
61
+ "vocab_size": 128256
62
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/3ac9f00c63887961a784.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 14336,
15
+ "max_position_embeddings": 131072,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 16,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
23
+ "checkpoint_revision": "6a6f4aa4197940add57724a7707d069478df56b1",
24
+ "continuous_batching": true,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 8,
30
+ "max_batch_size": 16,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 8
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 32,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 8.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": false,
60
+ "use_cache": true,
61
+ "vocab_size": 128256
62
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/8d70cd76e737aaa4eaa4.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 14336,
15
+ "max_position_embeddings": 131072,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 4,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
23
+ "checkpoint_revision": "6a6f4aa4197940add57724a7707d069478df56b1",
24
+ "continuous_batching": true,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 8,
30
+ "max_batch_size": 4,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 8
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 32,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 8.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": false,
60
+ "use_cache": true,
61
+ "vocab_size": 128256
62
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/ad6ef9f317fb8e1ab4f1.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 14336,
15
+ "max_position_embeddings": 131072,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 8,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
23
+ "checkpoint_revision": "6a6f4aa4197940add57724a7707d069478df56b1",
24
+ "continuous_batching": true,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 8,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 32,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 8.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": false,
60
+ "use_cache": true,
61
+ "vocab_size": 128256
62
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/lmsys/vicuna-7b-v1.5/e6eb0587815d37abaf03.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "lmsys/vicuna-7b-v1.5",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 11008,
15
+ "max_position_embeddings": 4096,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 4,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "lmsys/vicuna-7b-v1.5",
23
+ "checkpoint_revision": "3321f76e3f527bd14065daf69dad9344000a201d",
24
+ "continuous_batching": true,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 4,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "float16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 32,
48
+ "num_key_value_heads": 32,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": null,
52
+ "rope_theta": 10000.0,
53
+ "tie_word_embeddings": false,
54
+ "use_cache": true,
55
+ "vocab_size": 32000
56
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/lmsys/vicuna-7b-v1.5/eb93c62140353ba54657.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "lmsys/vicuna-7b-v1.5",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 11008,
15
+ "max_position_embeddings": 4096,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 1,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "lmsys/vicuna-7b-v1.5",
23
+ "checkpoint_revision": "3321f76e3f527bd14065daf69dad9344000a201d",
24
+ "continuous_batching": false,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 1,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "float16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 32,
48
+ "num_key_value_heads": 32,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": null,
52
+ "rope_theta": 10000.0,
53
+ "tie_word_embeddings": false,
54
+ "use_cache": true,
55
+ "vocab_size": 32000
56
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/princeton-nlp/Sheared-LLaMA-1.3B/8a8971a0da11451cb8a9.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "princeton-nlp/Sheared-LLaMA-1.3B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 5504,
15
+ "max_position_embeddings": 4096,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 4,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "princeton-nlp/Sheared-LLaMA-1.3B",
23
+ "checkpoint_revision": "a4b76938edbf571ea7d7d9904861cbdca08809b4",
24
+ "continuous_batching": true,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 4,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "float16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 16,
47
+ "num_hidden_layers": 24,
48
+ "num_key_value_heads": 16,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": null,
52
+ "rope_theta": 10000.0,
53
+ "tie_word_embeddings": false,
54
+ "use_cache": true,
55
+ "vocab_size": 32000
56
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/princeton-nlp/Sheared-LLaMA-1.3B/d0e265b870b2f9fc91c5.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "princeton-nlp/Sheared-LLaMA-1.3B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 5504,
15
+ "max_position_embeddings": 4096,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 1,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "princeton-nlp/Sheared-LLaMA-1.3B",
23
+ "checkpoint_revision": "a4b76938edbf571ea7d7d9904861cbdca08809b4",
24
+ "continuous_batching": false,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 1,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "float16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 16,
47
+ "num_hidden_layers": 24,
48
+ "num_key_value_heads": 16,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": null,
52
+ "rope_theta": 10000.0,
53
+ "tie_word_embeddings": false,
54
+ "use_cache": true,
55
+ "vocab_size": 32000
56
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/unsloth/Llama-3.2-1B/62b8172ee838a29e1e7f.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "unsloth/Llama-3.2-1B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 64,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 8192,
15
+ "max_position_embeddings": 131072,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 1,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "unsloth/Llama-3.2-1B",
23
+ "checkpoint_revision": "9535bd9b1d1dea6acafbdc4813b728796aeb28da",
24
+ "continuous_batching": false,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 1,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 16,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 32.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": true,
60
+ "unsloth_fixed": true,
61
+ "use_cache": true,
62
+ "vocab_size": 128256
63
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama/unsloth/Llama-3.2-1B/f19511a53b988b95bb49.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "unsloth/Llama-3.2-1B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "LlamaForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "head_dim": 64,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 8192,
15
+ "max_position_embeddings": 131072,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "neuron": {
19
+ "_serialized_key": "NxDNeuronConfig",
20
+ "batch_size": 4,
21
+ "capacity_factor": null,
22
+ "checkpoint_id": "unsloth/Llama-3.2-1B",
23
+ "checkpoint_revision": "9535bd9b1d1dea6acafbdc4813b728796aeb28da",
24
+ "continuous_batching": true,
25
+ "enable_bucketing": false,
26
+ "ep_degree": 1,
27
+ "fused_qkv": true,
28
+ "glu_mlp": true,
29
+ "local_ranks_size": 2,
30
+ "max_batch_size": 4,
31
+ "max_context_length": 4096,
32
+ "max_topk": 256,
33
+ "n_active_tokens": 4096,
34
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
35
+ "on_device_sampling": true,
36
+ "optimum_neuron_version": "0.4.0",
37
+ "output_logits": false,
38
+ "pp_degree": 1,
39
+ "sequence_length": 4096,
40
+ "speculation_length": 0,
41
+ "start_rank_id": 0,
42
+ "target": "trn1",
43
+ "torch_dtype": "bfloat16",
44
+ "tp_degree": 2
45
+ },
46
+ "num_attention_heads": 32,
47
+ "num_hidden_layers": 16,
48
+ "num_key_value_heads": 8,
49
+ "pretraining_tp": 1,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": {
52
+ "factor": 32.0,
53
+ "high_freq_factor": 4.0,
54
+ "low_freq_factor": 1.0,
55
+ "original_max_position_embeddings": 8192,
56
+ "rope_type": "llama3"
57
+ },
58
+ "rope_theta": 500000.0,
59
+ "tie_word_embeddings": true,
60
+ "unsloth_fixed": true,
61
+ "use_cache": true,
62
+ "vocab_size": 128256
63
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Maverick-17B-128E-Instruct/115ac93cb9174db4e67f.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 2,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 1048576,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 1,
73
+ 3,
74
+ 5,
75
+ 7,
76
+ 9,
77
+ 11,
78
+ 13,
79
+ 15,
80
+ 17,
81
+ 19,
82
+ 21,
83
+ 23,
84
+ 25,
85
+ 27,
86
+ 29,
87
+ 31,
88
+ 33,
89
+ 35,
90
+ 37,
91
+ 39,
92
+ 41,
93
+ 43,
94
+ 45,
95
+ 47
96
+ ],
97
+ "neuron": {
98
+ "_serialized_key": "NxDNeuronConfig",
99
+ "batch_size": 1,
100
+ "capacity_factor": null,
101
+ "checkpoint_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct",
102
+ "checkpoint_revision": "73d14711bcc77c16df3470856949c3764056b617",
103
+ "continuous_batching": false,
104
+ "enable_bucketing": false,
105
+ "ep_degree": 1,
106
+ "fused_qkv": false,
107
+ "glu_mlp": true,
108
+ "local_ranks_size": 64,
109
+ "max_batch_size": 1,
110
+ "max_context_length": 4096,
111
+ "max_topk": 256,
112
+ "n_active_tokens": 4096,
113
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
114
+ "on_device_sampling": true,
115
+ "optimum_neuron_version": "0.4.0",
116
+ "output_logits": false,
117
+ "pp_degree": 1,
118
+ "sequence_length": 4096,
119
+ "speculation_length": 0,
120
+ "start_rank_id": 0,
121
+ "target": "trn2",
122
+ "torch_dtype": "bfloat16",
123
+ "tp_degree": 64
124
+ },
125
+ "no_rope_layers": [
126
+ 1,
127
+ 1,
128
+ 1,
129
+ 0,
130
+ 1,
131
+ 1,
132
+ 1,
133
+ 0,
134
+ 1,
135
+ 1,
136
+ 1,
137
+ 0,
138
+ 1,
139
+ 1,
140
+ 1,
141
+ 0,
142
+ 1,
143
+ 1,
144
+ 1,
145
+ 0,
146
+ 1,
147
+ 1,
148
+ 1,
149
+ 0,
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0
174
+ ],
175
+ "num_attention_heads": 40,
176
+ "num_experts_per_tok": 1,
177
+ "num_hidden_layers": 48,
178
+ "num_key_value_heads": 8,
179
+ "num_local_experts": 128,
180
+ "output_router_logits": false,
181
+ "rms_norm_eps": 1e-05,
182
+ "rope_scaling": null,
183
+ "rope_theta": 500000.0,
184
+ "router_aux_loss_coef": 0.001,
185
+ "router_jitter_noise": 0.0,
186
+ "tie_word_embeddings": false,
187
+ "use_cache": true,
188
+ "use_qk_norm": false,
189
+ "vocab_size": 202048
190
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Maverick-17B-128E-Instruct/f70dea2be77b8d1dc8ed.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 2,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 1048576,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 1,
73
+ 3,
74
+ 5,
75
+ 7,
76
+ 9,
77
+ 11,
78
+ 13,
79
+ 15,
80
+ 17,
81
+ 19,
82
+ 21,
83
+ 23,
84
+ 25,
85
+ 27,
86
+ 29,
87
+ 31,
88
+ 33,
89
+ 35,
90
+ 37,
91
+ 39,
92
+ 41,
93
+ 43,
94
+ 45,
95
+ 47
96
+ ],
97
+ "neuron": {
98
+ "_serialized_key": "NxDNeuronConfig",
99
+ "batch_size": 4,
100
+ "capacity_factor": null,
101
+ "checkpoint_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct",
102
+ "checkpoint_revision": "73d14711bcc77c16df3470856949c3764056b617",
103
+ "continuous_batching": true,
104
+ "enable_bucketing": false,
105
+ "ep_degree": 1,
106
+ "fused_qkv": false,
107
+ "glu_mlp": true,
108
+ "local_ranks_size": 64,
109
+ "max_batch_size": 4,
110
+ "max_context_length": 4096,
111
+ "max_topk": 256,
112
+ "n_active_tokens": 4096,
113
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
114
+ "on_device_sampling": true,
115
+ "optimum_neuron_version": "0.4.0",
116
+ "output_logits": false,
117
+ "pp_degree": 1,
118
+ "sequence_length": 4096,
119
+ "speculation_length": 0,
120
+ "start_rank_id": 0,
121
+ "target": "trn2",
122
+ "torch_dtype": "bfloat16",
123
+ "tp_degree": 64
124
+ },
125
+ "no_rope_layers": [
126
+ 1,
127
+ 1,
128
+ 1,
129
+ 0,
130
+ 1,
131
+ 1,
132
+ 1,
133
+ 0,
134
+ 1,
135
+ 1,
136
+ 1,
137
+ 0,
138
+ 1,
139
+ 1,
140
+ 1,
141
+ 0,
142
+ 1,
143
+ 1,
144
+ 1,
145
+ 0,
146
+ 1,
147
+ 1,
148
+ 1,
149
+ 0,
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0
174
+ ],
175
+ "num_attention_heads": 40,
176
+ "num_experts_per_tok": 1,
177
+ "num_hidden_layers": 48,
178
+ "num_key_value_heads": 8,
179
+ "num_local_experts": 128,
180
+ "output_router_logits": false,
181
+ "rms_norm_eps": 1e-05,
182
+ "rope_scaling": null,
183
+ "rope_theta": 500000.0,
184
+ "router_aux_loss_coef": 0.001,
185
+ "router_jitter_noise": 0.0,
186
+ "tie_word_embeddings": false,
187
+ "use_cache": true,
188
+ "use_qk_norm": false,
189
+ "vocab_size": 202048
190
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/07656ae2a159358e76ff.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 4,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": true,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 32,
133
+ "max_batch_size": 4,
134
+ "max_context_length": 4096,
135
+ "max_topk": 256,
136
+ "n_active_tokens": 4096,
137
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
138
+ "on_device_sampling": true,
139
+ "optimum_neuron_version": "0.4.0",
140
+ "output_logits": false,
141
+ "pp_degree": 1,
142
+ "sequence_length": 4096,
143
+ "speculation_length": 0,
144
+ "start_rank_id": 0,
145
+ "target": "trn1",
146
+ "torch_dtype": "bfloat16",
147
+ "tp_degree": 32
148
+ },
149
+ "no_rope_layers": [
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0,
174
+ 1,
175
+ 1,
176
+ 1,
177
+ 0,
178
+ 1,
179
+ 1,
180
+ 1,
181
+ 0,
182
+ 1,
183
+ 1,
184
+ 1,
185
+ 0,
186
+ 1,
187
+ 1,
188
+ 1,
189
+ 0,
190
+ 1,
191
+ 1,
192
+ 1,
193
+ 0,
194
+ 1,
195
+ 1,
196
+ 1,
197
+ 0
198
+ ],
199
+ "num_attention_heads": 40,
200
+ "num_experts_per_tok": 1,
201
+ "num_hidden_layers": 48,
202
+ "num_key_value_heads": 8,
203
+ "num_local_experts": 16,
204
+ "output_router_logits": false,
205
+ "rms_norm_eps": 1e-05,
206
+ "rope_scaling": {
207
+ "factor": 16.0,
208
+ "high_freq_factor": 1.0,
209
+ "low_freq_factor": 1.0,
210
+ "original_max_position_embeddings": 8192,
211
+ "rope_type": "llama3"
212
+ },
213
+ "rope_theta": 500000.0,
214
+ "router_aux_loss_coef": 0.001,
215
+ "router_jitter_noise": 0.0,
216
+ "tie_word_embeddings": false,
217
+ "use_cache": true,
218
+ "use_qk_norm": true,
219
+ "vocab_size": 202048
220
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/0b9d19926bec30ac4419.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 1,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": false,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 32,
133
+ "max_batch_size": 1,
134
+ "max_context_length": 4096,
135
+ "max_topk": 256,
136
+ "n_active_tokens": 4096,
137
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
138
+ "on_device_sampling": true,
139
+ "optimum_neuron_version": "0.4.0",
140
+ "output_logits": false,
141
+ "pp_degree": 1,
142
+ "sequence_length": 4096,
143
+ "speculation_length": 0,
144
+ "start_rank_id": 0,
145
+ "target": "trn1",
146
+ "torch_dtype": "bfloat16",
147
+ "tp_degree": 32
148
+ },
149
+ "no_rope_layers": [
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0,
174
+ 1,
175
+ 1,
176
+ 1,
177
+ 0,
178
+ 1,
179
+ 1,
180
+ 1,
181
+ 0,
182
+ 1,
183
+ 1,
184
+ 1,
185
+ 0,
186
+ 1,
187
+ 1,
188
+ 1,
189
+ 0,
190
+ 1,
191
+ 1,
192
+ 1,
193
+ 0,
194
+ 1,
195
+ 1,
196
+ 1,
197
+ 0
198
+ ],
199
+ "num_attention_heads": 40,
200
+ "num_experts_per_tok": 1,
201
+ "num_hidden_layers": 48,
202
+ "num_key_value_heads": 8,
203
+ "num_local_experts": 16,
204
+ "output_router_logits": false,
205
+ "rms_norm_eps": 1e-05,
206
+ "rope_scaling": {
207
+ "factor": 16.0,
208
+ "high_freq_factor": 1.0,
209
+ "low_freq_factor": 1.0,
210
+ "original_max_position_embeddings": 8192,
211
+ "rope_type": "llama3"
212
+ },
213
+ "rope_theta": 500000.0,
214
+ "router_aux_loss_coef": 0.001,
215
+ "router_jitter_noise": 0.0,
216
+ "tie_word_embeddings": false,
217
+ "use_cache": true,
218
+ "use_qk_norm": true,
219
+ "vocab_size": 202048
220
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/236b23417ad1c79fbb5f.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 8,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": true,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 32,
133
+ "max_batch_size": 8,
134
+ "max_context_length": 4096,
135
+ "max_topk": 256,
136
+ "n_active_tokens": 4096,
137
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
138
+ "on_device_sampling": true,
139
+ "optimum_neuron_version": "0.4.0",
140
+ "output_logits": false,
141
+ "pp_degree": 1,
142
+ "sequence_length": 4096,
143
+ "speculation_length": 0,
144
+ "start_rank_id": 0,
145
+ "target": "trn2",
146
+ "torch_dtype": "bfloat16",
147
+ "tp_degree": 32
148
+ },
149
+ "no_rope_layers": [
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0,
174
+ 1,
175
+ 1,
176
+ 1,
177
+ 0,
178
+ 1,
179
+ 1,
180
+ 1,
181
+ 0,
182
+ 1,
183
+ 1,
184
+ 1,
185
+ 0,
186
+ 1,
187
+ 1,
188
+ 1,
189
+ 0,
190
+ 1,
191
+ 1,
192
+ 1,
193
+ 0,
194
+ 1,
195
+ 1,
196
+ 1,
197
+ 0
198
+ ],
199
+ "num_attention_heads": 40,
200
+ "num_experts_per_tok": 1,
201
+ "num_hidden_layers": 48,
202
+ "num_key_value_heads": 8,
203
+ "num_local_experts": 16,
204
+ "output_router_logits": false,
205
+ "rms_norm_eps": 1e-05,
206
+ "rope_scaling": {
207
+ "factor": 16.0,
208
+ "high_freq_factor": 1.0,
209
+ "low_freq_factor": 1.0,
210
+ "original_max_position_embeddings": 8192,
211
+ "rope_type": "llama3"
212
+ },
213
+ "rope_theta": 500000.0,
214
+ "router_aux_loss_coef": 0.001,
215
+ "router_jitter_noise": 0.0,
216
+ "tie_word_embeddings": false,
217
+ "use_cache": true,
218
+ "use_qk_norm": true,
219
+ "vocab_size": 202048
220
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/2a96ee4639be3796f16b.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 16,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": true,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 32,
133
+ "max_batch_size": 16,
134
+ "max_context_length": 4096,
135
+ "max_topk": 256,
136
+ "n_active_tokens": 4096,
137
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
138
+ "on_device_sampling": true,
139
+ "optimum_neuron_version": "0.4.0",
140
+ "output_logits": false,
141
+ "pp_degree": 1,
142
+ "sequence_length": 4096,
143
+ "speculation_length": 0,
144
+ "start_rank_id": 0,
145
+ "target": "trn2",
146
+ "torch_dtype": "bfloat16",
147
+ "tp_degree": 32
148
+ },
149
+ "no_rope_layers": [
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0,
174
+ 1,
175
+ 1,
176
+ 1,
177
+ 0,
178
+ 1,
179
+ 1,
180
+ 1,
181
+ 0,
182
+ 1,
183
+ 1,
184
+ 1,
185
+ 0,
186
+ 1,
187
+ 1,
188
+ 1,
189
+ 0,
190
+ 1,
191
+ 1,
192
+ 1,
193
+ 0,
194
+ 1,
195
+ 1,
196
+ 1,
197
+ 0
198
+ ],
199
+ "num_attention_heads": 40,
200
+ "num_experts_per_tok": 1,
201
+ "num_hidden_layers": 48,
202
+ "num_key_value_heads": 8,
203
+ "num_local_experts": 16,
204
+ "output_router_logits": false,
205
+ "rms_norm_eps": 1e-05,
206
+ "rope_scaling": {
207
+ "factor": 16.0,
208
+ "high_freq_factor": 1.0,
209
+ "low_freq_factor": 1.0,
210
+ "original_max_position_embeddings": 8192,
211
+ "rope_type": "llama3"
212
+ },
213
+ "rope_theta": 500000.0,
214
+ "router_aux_loss_coef": 0.001,
215
+ "router_jitter_noise": 0.0,
216
+ "tie_word_embeddings": false,
217
+ "use_cache": true,
218
+ "use_qk_norm": true,
219
+ "vocab_size": 202048
220
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/621024dbf42a03b7babc.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 4,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": true,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 32,
133
+ "max_batch_size": 4,
134
+ "max_context_length": 4096,
135
+ "max_topk": 256,
136
+ "n_active_tokens": 4096,
137
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
138
+ "on_device_sampling": true,
139
+ "optimum_neuron_version": "0.4.0",
140
+ "output_logits": false,
141
+ "pp_degree": 1,
142
+ "sequence_length": 4096,
143
+ "speculation_length": 0,
144
+ "start_rank_id": 0,
145
+ "target": "trn2",
146
+ "torch_dtype": "bfloat16",
147
+ "tp_degree": 32
148
+ },
149
+ "no_rope_layers": [
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0,
174
+ 1,
175
+ 1,
176
+ 1,
177
+ 0,
178
+ 1,
179
+ 1,
180
+ 1,
181
+ 0,
182
+ 1,
183
+ 1,
184
+ 1,
185
+ 0,
186
+ 1,
187
+ 1,
188
+ 1,
189
+ 0,
190
+ 1,
191
+ 1,
192
+ 1,
193
+ 0,
194
+ 1,
195
+ 1,
196
+ 1,
197
+ 0
198
+ ],
199
+ "num_attention_heads": 40,
200
+ "num_experts_per_tok": 1,
201
+ "num_hidden_layers": 48,
202
+ "num_key_value_heads": 8,
203
+ "num_local_experts": 16,
204
+ "output_router_logits": false,
205
+ "rms_norm_eps": 1e-05,
206
+ "rope_scaling": {
207
+ "factor": 16.0,
208
+ "high_freq_factor": 1.0,
209
+ "low_freq_factor": 1.0,
210
+ "original_max_position_embeddings": 8192,
211
+ "rope_type": "llama3"
212
+ },
213
+ "rope_theta": 500000.0,
214
+ "router_aux_loss_coef": 0.001,
215
+ "router_jitter_noise": 0.0,
216
+ "tie_word_embeddings": false,
217
+ "use_cache": true,
218
+ "use_qk_norm": true,
219
+ "vocab_size": 202048
220
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/923fcd4cf259579b5e4a.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 1,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": false,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 32,
133
+ "max_batch_size": 1,
134
+ "max_context_length": 4096,
135
+ "max_topk": 256,
136
+ "n_active_tokens": 4096,
137
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
138
+ "on_device_sampling": true,
139
+ "optimum_neuron_version": "0.4.0",
140
+ "output_logits": false,
141
+ "pp_degree": 1,
142
+ "sequence_length": 4096,
143
+ "speculation_length": 0,
144
+ "start_rank_id": 0,
145
+ "target": "trn2",
146
+ "torch_dtype": "bfloat16",
147
+ "tp_degree": 32
148
+ },
149
+ "no_rope_layers": [
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0,
174
+ 1,
175
+ 1,
176
+ 1,
177
+ 0,
178
+ 1,
179
+ 1,
180
+ 1,
181
+ 0,
182
+ 1,
183
+ 1,
184
+ 1,
185
+ 0,
186
+ 1,
187
+ 1,
188
+ 1,
189
+ 0,
190
+ 1,
191
+ 1,
192
+ 1,
193
+ 0,
194
+ 1,
195
+ 1,
196
+ 1,
197
+ 0
198
+ ],
199
+ "num_attention_heads": 40,
200
+ "num_experts_per_tok": 1,
201
+ "num_hidden_layers": 48,
202
+ "num_key_value_heads": 8,
203
+ "num_local_experts": 16,
204
+ "output_router_logits": false,
205
+ "rms_norm_eps": 1e-05,
206
+ "rope_scaling": {
207
+ "factor": 16.0,
208
+ "high_freq_factor": 1.0,
209
+ "low_freq_factor": 1.0,
210
+ "original_max_position_embeddings": 8192,
211
+ "rope_type": "llama3"
212
+ },
213
+ "rope_theta": 500000.0,
214
+ "router_aux_loss_coef": 0.001,
215
+ "router_jitter_noise": 0.0,
216
+ "tie_word_embeddings": false,
217
+ "use_cache": true,
218
+ "use_qk_norm": true,
219
+ "vocab_size": 202048
220
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/a90ff1e995579ec8deee.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 4,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": true,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 16,
133
+ "max_batch_size": 4,
134
+ "max_context_length": 4096,
135
+ "max_topk": 256,
136
+ "n_active_tokens": 4096,
137
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
138
+ "on_device_sampling": true,
139
+ "optimum_neuron_version": "0.4.0",
140
+ "output_logits": false,
141
+ "pp_degree": 1,
142
+ "sequence_length": 4096,
143
+ "speculation_length": 0,
144
+ "start_rank_id": 0,
145
+ "target": "trn1",
146
+ "torch_dtype": "bfloat16",
147
+ "tp_degree": 16
148
+ },
149
+ "no_rope_layers": [
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0,
174
+ 1,
175
+ 1,
176
+ 1,
177
+ 0,
178
+ 1,
179
+ 1,
180
+ 1,
181
+ 0,
182
+ 1,
183
+ 1,
184
+ 1,
185
+ 0,
186
+ 1,
187
+ 1,
188
+ 1,
189
+ 0,
190
+ 1,
191
+ 1,
192
+ 1,
193
+ 0,
194
+ 1,
195
+ 1,
196
+ 1,
197
+ 0
198
+ ],
199
+ "num_attention_heads": 40,
200
+ "num_experts_per_tok": 1,
201
+ "num_hidden_layers": 48,
202
+ "num_key_value_heads": 8,
203
+ "num_local_experts": 16,
204
+ "output_router_logits": false,
205
+ "rms_norm_eps": 1e-05,
206
+ "rope_scaling": {
207
+ "factor": 16.0,
208
+ "high_freq_factor": 1.0,
209
+ "low_freq_factor": 1.0,
210
+ "original_max_position_embeddings": 8192,
211
+ "rope_type": "llama3"
212
+ },
213
+ "rope_theta": 500000.0,
214
+ "router_aux_loss_coef": 0.001,
215
+ "router_jitter_noise": 0.0,
216
+ "tie_word_embeddings": false,
217
+ "use_cache": true,
218
+ "use_qk_norm": true,
219
+ "vocab_size": 202048
220
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/c3450e1affaca20e05e3.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 1,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": false,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 16,
133
+ "max_batch_size": 1,
134
+ "max_context_length": 4096,
135
+ "max_topk": 256,
136
+ "n_active_tokens": 4096,
137
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
138
+ "on_device_sampling": true,
139
+ "optimum_neuron_version": "0.4.0",
140
+ "output_logits": false,
141
+ "pp_degree": 1,
142
+ "sequence_length": 4096,
143
+ "speculation_length": 0,
144
+ "start_rank_id": 0,
145
+ "target": "trn1",
146
+ "torch_dtype": "bfloat16",
147
+ "tp_degree": 16
148
+ },
149
+ "no_rope_layers": [
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0,
174
+ 1,
175
+ 1,
176
+ 1,
177
+ 0,
178
+ 1,
179
+ 1,
180
+ 1,
181
+ 0,
182
+ 1,
183
+ 1,
184
+ 1,
185
+ 0,
186
+ 1,
187
+ 1,
188
+ 1,
189
+ 0,
190
+ 1,
191
+ 1,
192
+ 1,
193
+ 0,
194
+ 1,
195
+ 1,
196
+ 1,
197
+ 0
198
+ ],
199
+ "num_attention_heads": 40,
200
+ "num_experts_per_tok": 1,
201
+ "num_hidden_layers": 48,
202
+ "num_key_value_heads": 8,
203
+ "num_local_experts": 16,
204
+ "output_router_logits": false,
205
+ "rms_norm_eps": 1e-05,
206
+ "rope_scaling": {
207
+ "factor": 16.0,
208
+ "high_freq_factor": 1.0,
209
+ "low_freq_factor": 1.0,
210
+ "original_max_position_embeddings": 8192,
211
+ "rope_type": "llama3"
212
+ },
213
+ "rope_theta": 500000.0,
214
+ "router_aux_loss_coef": 0.001,
215
+ "router_jitter_noise": 0.0,
216
+ "tie_word_embeddings": false,
217
+ "use_cache": true,
218
+ "use_qk_norm": true,
219
+ "vocab_size": 202048
220
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/d33cefe3ad2c77e0544b.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 32,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": true,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 32,
133
+ "max_batch_size": 32,
134
+ "max_context_length": 4096,
135
+ "max_topk": 256,
136
+ "n_active_tokens": 4096,
137
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
138
+ "on_device_sampling": true,
139
+ "optimum_neuron_version": "0.4.0",
140
+ "output_logits": false,
141
+ "pp_degree": 1,
142
+ "sequence_length": 4096,
143
+ "speculation_length": 0,
144
+ "start_rank_id": 0,
145
+ "target": "trn2",
146
+ "torch_dtype": "bfloat16",
147
+ "tp_degree": 32
148
+ },
149
+ "no_rope_layers": [
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0,
174
+ 1,
175
+ 1,
176
+ 1,
177
+ 0,
178
+ 1,
179
+ 1,
180
+ 1,
181
+ 0,
182
+ 1,
183
+ 1,
184
+ 1,
185
+ 0,
186
+ 1,
187
+ 1,
188
+ 1,
189
+ 0,
190
+ 1,
191
+ 1,
192
+ 1,
193
+ 0,
194
+ 1,
195
+ 1,
196
+ 1,
197
+ 0
198
+ ],
199
+ "num_attention_heads": 40,
200
+ "num_experts_per_tok": 1,
201
+ "num_hidden_layers": 48,
202
+ "num_key_value_heads": 8,
203
+ "num_local_experts": 16,
204
+ "output_router_logits": false,
205
+ "rms_norm_eps": 1e-05,
206
+ "rope_scaling": {
207
+ "factor": 16.0,
208
+ "high_freq_factor": 1.0,
209
+ "low_freq_factor": 1.0,
210
+ "original_max_position_embeddings": 8192,
211
+ "rope_type": "llama3"
212
+ },
213
+ "rope_theta": 500000.0,
214
+ "router_aux_loss_coef": 0.001,
215
+ "router_jitter_noise": 0.0,
216
+ "tie_word_embeddings": false,
217
+ "use_cache": true,
218
+ "use_qk_norm": true,
219
+ "vocab_size": 202048
220
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/f71e619c760aaf9e2888.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 8,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": true,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 32,
133
+ "max_batch_size": 8,
134
+ "max_context_length": 4096,
135
+ "max_topk": 256,
136
+ "n_active_tokens": 4096,
137
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
138
+ "on_device_sampling": true,
139
+ "optimum_neuron_version": "0.4.0",
140
+ "output_logits": false,
141
+ "pp_degree": 1,
142
+ "sequence_length": 4096,
143
+ "speculation_length": 0,
144
+ "start_rank_id": 0,
145
+ "target": "trn1",
146
+ "torch_dtype": "bfloat16",
147
+ "tp_degree": 32
148
+ },
149
+ "no_rope_layers": [
150
+ 1,
151
+ 1,
152
+ 1,
153
+ 0,
154
+ 1,
155
+ 1,
156
+ 1,
157
+ 0,
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 0,
162
+ 1,
163
+ 1,
164
+ 1,
165
+ 0,
166
+ 1,
167
+ 1,
168
+ 1,
169
+ 0,
170
+ 1,
171
+ 1,
172
+ 1,
173
+ 0,
174
+ 1,
175
+ 1,
176
+ 1,
177
+ 0,
178
+ 1,
179
+ 1,
180
+ 1,
181
+ 0,
182
+ 1,
183
+ 1,
184
+ 1,
185
+ 0,
186
+ 1,
187
+ 1,
188
+ 1,
189
+ 0,
190
+ 1,
191
+ 1,
192
+ 1,
193
+ 0,
194
+ 1,
195
+ 1,
196
+ 1,
197
+ 0
198
+ ],
199
+ "num_attention_heads": 40,
200
+ "num_experts_per_tok": 1,
201
+ "num_hidden_layers": 48,
202
+ "num_key_value_heads": 8,
203
+ "num_local_experts": 16,
204
+ "output_router_logits": false,
205
+ "rms_norm_eps": 1e-05,
206
+ "rope_scaling": {
207
+ "factor": 16.0,
208
+ "high_freq_factor": 1.0,
209
+ "low_freq_factor": 1.0,
210
+ "original_max_position_embeddings": 8192,
211
+ "rope_type": "llama3"
212
+ },
213
+ "rope_theta": 500000.0,
214
+ "router_aux_loss_coef": 0.001,
215
+ "router_jitter_noise": 0.0,
216
+ "tie_word_embeddings": false,
217
+ "use_cache": true,
218
+ "use_qk_norm": true,
219
+ "vocab_size": 202048
220
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/phi3/microsoft/Phi-3-mini-4k-instruct/3558b5ac7259b6bcc01a.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "microsoft/Phi-3-mini-4k-instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Phi3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "auto_map": {
11
+ "AutoConfig": "configuration_phi3.Phi3Config",
12
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
13
+ },
14
+ "embd_pdrop": 0.0,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 3072,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 8192,
19
+ "max_position_embeddings": 4096,
20
+ "model_type": "phi3",
21
+ "neuron": {
22
+ "_serialized_key": "NxDNeuronConfig",
23
+ "batch_size": 1,
24
+ "capacity_factor": null,
25
+ "checkpoint_id": "microsoft/Phi-3-mini-4k-instruct",
26
+ "checkpoint_revision": "0a67737cc96d2554230f90338b163bc6380a2a85",
27
+ "continuous_batching": false,
28
+ "enable_bucketing": false,
29
+ "ep_degree": 1,
30
+ "fused_qkv": true,
31
+ "glu_mlp": true,
32
+ "local_ranks_size": 2,
33
+ "max_batch_size": 1,
34
+ "max_context_length": 4096,
35
+ "max_topk": 256,
36
+ "n_active_tokens": 4096,
37
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
38
+ "on_device_sampling": true,
39
+ "optimum_neuron_version": "0.4.0",
40
+ "output_logits": false,
41
+ "pp_degree": 1,
42
+ "sequence_length": 4096,
43
+ "speculation_length": 0,
44
+ "start_rank_id": 0,
45
+ "target": "trn1",
46
+ "torch_dtype": "bfloat16",
47
+ "tp_degree": 2
48
+ },
49
+ "num_attention_heads": 32,
50
+ "num_hidden_layers": 32,
51
+ "num_key_value_heads": 32,
52
+ "original_max_position_embeddings": 4096,
53
+ "partial_rotary_factor": 1.0,
54
+ "resid_pdrop": 0.0,
55
+ "rms_norm_eps": 1e-05,
56
+ "rope_scaling": null,
57
+ "rope_theta": 10000.0,
58
+ "sliding_window": 2047,
59
+ "tie_word_embeddings": false,
60
+ "use_cache": true,
61
+ "vocab_size": 32064
62
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/phi3/microsoft/phi-4/38f87915d107c55b7651.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "microsoft/phi-4",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Phi3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "embd_pdrop": 0.0,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 5120,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 17920,
15
+ "max_position_embeddings": 16384,
16
+ "model_type": "phi3",
17
+ "neuron": {
18
+ "_serialized_key": "NxDNeuronConfig",
19
+ "batch_size": 16,
20
+ "capacity_factor": null,
21
+ "checkpoint_id": "microsoft/phi-4",
22
+ "checkpoint_revision": "187ef0342fff0eb3333be9f00389385e95ef0b61",
23
+ "continuous_batching": true,
24
+ "enable_bucketing": false,
25
+ "ep_degree": 1,
26
+ "fused_qkv": true,
27
+ "glu_mlp": true,
28
+ "local_ranks_size": 10,
29
+ "max_batch_size": 16,
30
+ "max_context_length": 4096,
31
+ "max_topk": 256,
32
+ "n_active_tokens": 4096,
33
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
34
+ "on_device_sampling": true,
35
+ "optimum_neuron_version": "0.4.0",
36
+ "output_logits": false,
37
+ "pp_degree": 1,
38
+ "sequence_length": 4096,
39
+ "speculation_length": 0,
40
+ "start_rank_id": 0,
41
+ "target": "trn1",
42
+ "torch_dtype": "bfloat16",
43
+ "tp_degree": 10
44
+ },
45
+ "num_attention_heads": 40,
46
+ "num_hidden_layers": 40,
47
+ "num_key_value_heads": 10,
48
+ "original_max_position_embeddings": 16384,
49
+ "partial_rotary_factor": 1.0,
50
+ "resid_pdrop": 0.0,
51
+ "rms_norm_eps": 1e-05,
52
+ "rope_scaling": null,
53
+ "rope_theta": 250000,
54
+ "sliding_window": null,
55
+ "tie_word_embeddings": false,
56
+ "use_cache": true,
57
+ "vocab_size": 100352
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/phi3/microsoft/phi-4/473a4f2462bcd8b3f136.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "microsoft/phi-4",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Phi3ForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "embd_pdrop": 0.0,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 5120,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 17920,
15
+ "max_position_embeddings": 16384,
16
+ "model_type": "phi3",
17
+ "neuron": {
18
+ "_serialized_key": "NxDNeuronConfig",
19
+ "batch_size": 1,
20
+ "capacity_factor": null,
21
+ "checkpoint_id": "microsoft/phi-4",
22
+ "checkpoint_revision": "187ef0342fff0eb3333be9f00389385e95ef0b61",
23
+ "continuous_batching": false,
24
+ "enable_bucketing": false,
25
+ "ep_degree": 1,
26
+ "fused_qkv": true,
27
+ "glu_mlp": true,
28
+ "local_ranks_size": 10,
29
+ "max_batch_size": 1,
30
+ "max_context_length": 4096,
31
+ "max_topk": 256,
32
+ "n_active_tokens": 4096,
33
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
34
+ "on_device_sampling": true,
35
+ "optimum_neuron_version": "0.4.0",
36
+ "output_logits": false,
37
+ "pp_degree": 1,
38
+ "sequence_length": 4096,
39
+ "speculation_length": 0,
40
+ "start_rank_id": 0,
41
+ "target": "trn1",
42
+ "torch_dtype": "bfloat16",
43
+ "tp_degree": 10
44
+ },
45
+ "num_attention_heads": 40,
46
+ "num_hidden_layers": 40,
47
+ "num_key_value_heads": 10,
48
+ "original_max_position_embeddings": 16384,
49
+ "partial_rotary_factor": 1.0,
50
+ "resid_pdrop": 0.0,
51
+ "rms_norm_eps": 1e-05,
52
+ "rope_scaling": null,
53
+ "rope_theta": 250000,
54
+ "sliding_window": null,
55
+ "tie_word_embeddings": false,
56
+ "use_cache": true,
57
+ "vocab_size": 100352
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-0.5B/300b37dace1ce2c0b783.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-0.5B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 896,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4864,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention"
38
+ ],
39
+ "max_position_embeddings": 32768,
40
+ "max_window_layers": 24,
41
+ "model_type": "qwen2",
42
+ "neuron": {
43
+ "_serialized_key": "NxDNeuronConfig",
44
+ "batch_size": 4,
45
+ "capacity_factor": null,
46
+ "checkpoint_id": "Qwen/Qwen2.5-0.5B",
47
+ "checkpoint_revision": "060db6499f32faf8b98477b0a26969ef7d8b9987",
48
+ "continuous_batching": true,
49
+ "enable_bucketing": false,
50
+ "ep_degree": 1,
51
+ "fused_qkv": false,
52
+ "glu_mlp": true,
53
+ "local_ranks_size": 2,
54
+ "max_batch_size": 4,
55
+ "max_context_length": 4096,
56
+ "max_topk": 256,
57
+ "n_active_tokens": 4096,
58
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
59
+ "on_device_sampling": false,
60
+ "optimum_neuron_version": "0.4.0",
61
+ "output_logits": false,
62
+ "pp_degree": 1,
63
+ "sequence_length": 4096,
64
+ "speculation_length": 0,
65
+ "start_rank_id": 0,
66
+ "target": "trn1",
67
+ "torch_dtype": "bfloat16",
68
+ "tp_degree": 2
69
+ },
70
+ "num_attention_heads": 14,
71
+ "num_hidden_layers": 24,
72
+ "num_key_value_heads": 2,
73
+ "rms_norm_eps": 1e-06,
74
+ "rope_scaling": null,
75
+ "rope_theta": 1000000.0,
76
+ "sliding_window": null,
77
+ "tie_word_embeddings": true,
78
+ "use_cache": true,
79
+ "use_mrope": false,
80
+ "use_sliding_window": false,
81
+ "vocab_size": 151936
82
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-1.5B/8d982941157412579546.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-1.5B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1536,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 8960,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention"
42
+ ],
43
+ "max_position_embeddings": 131072,
44
+ "max_window_layers": 28,
45
+ "model_type": "qwen2",
46
+ "neuron": {
47
+ "_serialized_key": "NxDNeuronConfig",
48
+ "batch_size": 4,
49
+ "capacity_factor": null,
50
+ "checkpoint_id": "Qwen/Qwen2.5-1.5B",
51
+ "checkpoint_revision": "8faed761d45a263340a0528343f099c05c9a4323",
52
+ "continuous_batching": true,
53
+ "enable_bucketing": false,
54
+ "ep_degree": 1,
55
+ "fused_qkv": false,
56
+ "glu_mlp": true,
57
+ "local_ranks_size": 2,
58
+ "max_batch_size": 4,
59
+ "max_context_length": 4096,
60
+ "max_topk": 256,
61
+ "n_active_tokens": 4096,
62
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
63
+ "on_device_sampling": false,
64
+ "optimum_neuron_version": "0.4.0",
65
+ "output_logits": false,
66
+ "pp_degree": 1,
67
+ "sequence_length": 4096,
68
+ "speculation_length": 0,
69
+ "start_rank_id": 0,
70
+ "target": "trn1",
71
+ "torch_dtype": "bfloat16",
72
+ "tp_degree": 2
73
+ },
74
+ "num_attention_heads": 12,
75
+ "num_hidden_layers": 28,
76
+ "num_key_value_heads": 2,
77
+ "rms_norm_eps": 1e-06,
78
+ "rope_scaling": null,
79
+ "rope_theta": 1000000.0,
80
+ "sliding_window": null,
81
+ "tie_word_embeddings": true,
82
+ "use_cache": true,
83
+ "use_mrope": false,
84
+ "use_sliding_window": false,
85
+ "vocab_size": 151936
86
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-1.5B/dea81904d370c8b20332.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-1.5B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1536,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 8960,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention"
42
+ ],
43
+ "max_position_embeddings": 131072,
44
+ "max_window_layers": 28,
45
+ "model_type": "qwen2",
46
+ "neuron": {
47
+ "_serialized_key": "NxDNeuronConfig",
48
+ "batch_size": 1,
49
+ "capacity_factor": null,
50
+ "checkpoint_id": "Qwen/Qwen2.5-1.5B",
51
+ "checkpoint_revision": "8faed761d45a263340a0528343f099c05c9a4323",
52
+ "continuous_batching": false,
53
+ "enable_bucketing": false,
54
+ "ep_degree": 1,
55
+ "fused_qkv": false,
56
+ "glu_mlp": true,
57
+ "local_ranks_size": 2,
58
+ "max_batch_size": 1,
59
+ "max_context_length": 4096,
60
+ "max_topk": 256,
61
+ "n_active_tokens": 4096,
62
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
63
+ "on_device_sampling": true,
64
+ "optimum_neuron_version": "0.4.0",
65
+ "output_logits": false,
66
+ "pp_degree": 1,
67
+ "sequence_length": 4096,
68
+ "speculation_length": 0,
69
+ "start_rank_id": 0,
70
+ "target": "trn1",
71
+ "torch_dtype": "bfloat16",
72
+ "tp_degree": 2
73
+ },
74
+ "num_attention_heads": 12,
75
+ "num_hidden_layers": 28,
76
+ "num_key_value_heads": 2,
77
+ "rms_norm_eps": 1e-06,
78
+ "rope_scaling": null,
79
+ "rope_theta": 1000000.0,
80
+ "sliding_window": null,
81
+ "tie_word_embeddings": true,
82
+ "use_cache": true,
83
+ "use_mrope": false,
84
+ "use_sliding_window": false,
85
+ "vocab_size": 151936
86
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-14B/877be4240e4a459b2a14.json ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-14B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 5120,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 13824,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention"
62
+ ],
63
+ "max_position_embeddings": 131072,
64
+ "max_window_layers": 48,
65
+ "model_type": "qwen2",
66
+ "neuron": {
67
+ "_serialized_key": "NxDNeuronConfig",
68
+ "batch_size": 1,
69
+ "capacity_factor": null,
70
+ "checkpoint_id": "Qwen/Qwen2.5-14B",
71
+ "checkpoint_revision": "97e1e76335b7017d8f67c08a19d103c0504298c9",
72
+ "continuous_batching": false,
73
+ "enable_bucketing": false,
74
+ "ep_degree": 1,
75
+ "fused_qkv": false,
76
+ "glu_mlp": true,
77
+ "local_ranks_size": 8,
78
+ "max_batch_size": 1,
79
+ "max_context_length": 4096,
80
+ "max_topk": 256,
81
+ "n_active_tokens": 4096,
82
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
83
+ "on_device_sampling": true,
84
+ "optimum_neuron_version": "0.4.0",
85
+ "output_logits": false,
86
+ "pp_degree": 1,
87
+ "sequence_length": 4096,
88
+ "speculation_length": 0,
89
+ "start_rank_id": 0,
90
+ "target": "trn1",
91
+ "torch_dtype": "bfloat16",
92
+ "tp_degree": 8
93
+ },
94
+ "num_attention_heads": 40,
95
+ "num_hidden_layers": 48,
96
+ "num_key_value_heads": 8,
97
+ "rms_norm_eps": 1e-05,
98
+ "rope_scaling": null,
99
+ "rope_theta": 1000000.0,
100
+ "sliding_window": null,
101
+ "tie_word_embeddings": false,
102
+ "use_cache": true,
103
+ "use_sliding_window": false,
104
+ "vocab_size": 152064
105
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-14B/c05ba11ec3a01458a2e6.json ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-14B",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 5120,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 13824,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention"
62
+ ],
63
+ "max_position_embeddings": 131072,
64
+ "max_window_layers": 48,
65
+ "model_type": "qwen2",
66
+ "neuron": {
67
+ "_serialized_key": "NxDNeuronConfig",
68
+ "batch_size": 16,
69
+ "capacity_factor": null,
70
+ "checkpoint_id": "Qwen/Qwen2.5-14B",
71
+ "checkpoint_revision": "97e1e76335b7017d8f67c08a19d103c0504298c9",
72
+ "continuous_batching": true,
73
+ "enable_bucketing": false,
74
+ "ep_degree": 1,
75
+ "fused_qkv": false,
76
+ "glu_mlp": true,
77
+ "local_ranks_size": 8,
78
+ "max_batch_size": 16,
79
+ "max_context_length": 4096,
80
+ "max_topk": 256,
81
+ "n_active_tokens": 4096,
82
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
83
+ "on_device_sampling": true,
84
+ "optimum_neuron_version": "0.4.0",
85
+ "output_logits": false,
86
+ "pp_degree": 1,
87
+ "sequence_length": 4096,
88
+ "speculation_length": 0,
89
+ "start_rank_id": 0,
90
+ "target": "trn1",
91
+ "torch_dtype": "bfloat16",
92
+ "tp_degree": 8
93
+ },
94
+ "num_attention_heads": 40,
95
+ "num_hidden_layers": 48,
96
+ "num_key_value_heads": 8,
97
+ "rms_norm_eps": 1e-05,
98
+ "rope_scaling": null,
99
+ "rope_theta": 1000000.0,
100
+ "sliding_window": null,
101
+ "tie_word_embeddings": false,
102
+ "use_cache": true,
103
+ "use_sliding_window": false,
104
+ "vocab_size": 152064
105
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-32B-Instruct/2e5ba8f801dbc7a16c3c.json ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-32B-Instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 5120,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 27648,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention",
72
+ "full_attention",
73
+ "full_attention",
74
+ "full_attention",
75
+ "full_attention",
76
+ "full_attention",
77
+ "full_attention"
78
+ ],
79
+ "max_position_embeddings": 32768,
80
+ "max_window_layers": 70,
81
+ "model_type": "qwen2",
82
+ "neuron": {
83
+ "_serialized_key": "NxDNeuronConfig",
84
+ "batch_size": 8,
85
+ "capacity_factor": null,
86
+ "checkpoint_id": "Qwen/Qwen2.5-32B-Instruct",
87
+ "checkpoint_revision": "5ede1c97bbab6ce5cda5812749b4c0bdf79b18dd",
88
+ "continuous_batching": true,
89
+ "enable_bucketing": false,
90
+ "ep_degree": 1,
91
+ "fused_qkv": false,
92
+ "glu_mlp": true,
93
+ "local_ranks_size": 8,
94
+ "max_batch_size": 8,
95
+ "max_context_length": 4096,
96
+ "max_topk": 256,
97
+ "n_active_tokens": 4096,
98
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
99
+ "on_device_sampling": true,
100
+ "optimum_neuron_version": "0.4.0",
101
+ "output_logits": false,
102
+ "pp_degree": 1,
103
+ "sequence_length": 4096,
104
+ "speculation_length": 0,
105
+ "start_rank_id": 0,
106
+ "target": "trn1",
107
+ "torch_dtype": "bfloat16",
108
+ "tp_degree": 8
109
+ },
110
+ "num_attention_heads": 40,
111
+ "num_hidden_layers": 64,
112
+ "num_key_value_heads": 8,
113
+ "rms_norm_eps": 1e-06,
114
+ "rope_scaling": null,
115
+ "rope_theta": 1000000.0,
116
+ "sliding_window": null,
117
+ "tie_word_embeddings": false,
118
+ "use_cache": true,
119
+ "use_sliding_window": false,
120
+ "vocab_size": 152064
121
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-32B-Instruct/7182911a8d43e7187430.json ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-32B-Instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 5120,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 27648,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention",
72
+ "full_attention",
73
+ "full_attention",
74
+ "full_attention",
75
+ "full_attention",
76
+ "full_attention",
77
+ "full_attention"
78
+ ],
79
+ "max_position_embeddings": 32768,
80
+ "max_window_layers": 70,
81
+ "model_type": "qwen2",
82
+ "neuron": {
83
+ "_serialized_key": "NxDNeuronConfig",
84
+ "batch_size": 1,
85
+ "capacity_factor": null,
86
+ "checkpoint_id": "Qwen/Qwen2.5-32B-Instruct",
87
+ "checkpoint_revision": "5ede1c97bbab6ce5cda5812749b4c0bdf79b18dd",
88
+ "continuous_batching": false,
89
+ "enable_bucketing": false,
90
+ "ep_degree": 1,
91
+ "fused_qkv": false,
92
+ "glu_mlp": true,
93
+ "local_ranks_size": 8,
94
+ "max_batch_size": 1,
95
+ "max_context_length": 4096,
96
+ "max_topk": 256,
97
+ "n_active_tokens": 4096,
98
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
99
+ "on_device_sampling": true,
100
+ "optimum_neuron_version": "0.4.0",
101
+ "output_logits": false,
102
+ "pp_degree": 1,
103
+ "sequence_length": 4096,
104
+ "speculation_length": 0,
105
+ "start_rank_id": 0,
106
+ "target": "trn1",
107
+ "torch_dtype": "bfloat16",
108
+ "tp_degree": 8
109
+ },
110
+ "num_attention_heads": 40,
111
+ "num_hidden_layers": 64,
112
+ "num_key_value_heads": 8,
113
+ "rms_norm_eps": 1e-06,
114
+ "rope_scaling": null,
115
+ "rope_theta": 1000000.0,
116
+ "sliding_window": null,
117
+ "tie_word_embeddings": false,
118
+ "use_cache": true,
119
+ "use_sliding_window": false,
120
+ "vocab_size": 152064
121
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-72B-Instruct/f3b6f76004dc3d143c7e.json ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-72B-Instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 8192,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 29568,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention",
72
+ "full_attention",
73
+ "full_attention",
74
+ "full_attention",
75
+ "full_attention",
76
+ "full_attention",
77
+ "full_attention",
78
+ "full_attention",
79
+ "full_attention",
80
+ "full_attention",
81
+ "full_attention",
82
+ "full_attention",
83
+ "full_attention",
84
+ "full_attention",
85
+ "full_attention",
86
+ "full_attention",
87
+ "full_attention",
88
+ "full_attention",
89
+ "full_attention",
90
+ "full_attention",
91
+ "full_attention",
92
+ "full_attention",
93
+ "full_attention"
94
+ ],
95
+ "max_position_embeddings": 32768,
96
+ "max_window_layers": 70,
97
+ "model_type": "qwen2",
98
+ "neuron": {
99
+ "_serialized_key": "NxDNeuronConfig",
100
+ "batch_size": 4,
101
+ "capacity_factor": null,
102
+ "checkpoint_id": "Qwen/Qwen2.5-72B-Instruct",
103
+ "checkpoint_revision": "495f39366efef23836d0cfae4fbe635880d2be31",
104
+ "continuous_batching": true,
105
+ "enable_bucketing": false,
106
+ "ep_degree": 1,
107
+ "fused_qkv": false,
108
+ "glu_mlp": true,
109
+ "local_ranks_size": 24,
110
+ "max_batch_size": 4,
111
+ "max_context_length": 4096,
112
+ "max_topk": 256,
113
+ "n_active_tokens": 4096,
114
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
115
+ "on_device_sampling": true,
116
+ "optimum_neuron_version": "0.4.0",
117
+ "output_logits": false,
118
+ "pp_degree": 1,
119
+ "sequence_length": 4096,
120
+ "speculation_length": 0,
121
+ "start_rank_id": 0,
122
+ "target": "trn1",
123
+ "torch_dtype": "bfloat16",
124
+ "tp_degree": 24
125
+ },
126
+ "num_attention_heads": 64,
127
+ "num_hidden_layers": 80,
128
+ "num_key_value_heads": 8,
129
+ "rms_norm_eps": 1e-06,
130
+ "rope_scaling": null,
131
+ "rope_theta": 1000000.0,
132
+ "sliding_window": null,
133
+ "tie_word_embeddings": false,
134
+ "use_cache": true,
135
+ "use_sliding_window": false,
136
+ "vocab_size": 152064
137
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-7B-Instruct/2ffd57bb17f3a35919c6.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-7B-Instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention"
42
+ ],
43
+ "max_position_embeddings": 32768,
44
+ "max_window_layers": 28,
45
+ "model_type": "qwen2",
46
+ "neuron": {
47
+ "_serialized_key": "NxDNeuronConfig",
48
+ "batch_size": 1,
49
+ "capacity_factor": null,
50
+ "checkpoint_id": "Qwen/Qwen2.5-7B-Instruct",
51
+ "checkpoint_revision": "a09a35458c702b33eeacc393d103063234e8bc28",
52
+ "continuous_batching": false,
53
+ "enable_bucketing": false,
54
+ "ep_degree": 1,
55
+ "fused_qkv": false,
56
+ "glu_mlp": true,
57
+ "local_ranks_size": 8,
58
+ "max_batch_size": 1,
59
+ "max_context_length": 4096,
60
+ "max_topk": 256,
61
+ "n_active_tokens": 4096,
62
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
63
+ "on_device_sampling": true,
64
+ "optimum_neuron_version": "0.4.0",
65
+ "output_logits": false,
66
+ "pp_degree": 1,
67
+ "sequence_length": 4096,
68
+ "speculation_length": 0,
69
+ "start_rank_id": 0,
70
+ "target": "trn1",
71
+ "torch_dtype": "bfloat16",
72
+ "tp_degree": 8
73
+ },
74
+ "num_attention_heads": 28,
75
+ "num_hidden_layers": 28,
76
+ "num_key_value_heads": 4,
77
+ "rms_norm_eps": 1e-06,
78
+ "rope_scaling": null,
79
+ "rope_theta": 1000000.0,
80
+ "sliding_window": null,
81
+ "tie_word_embeddings": false,
82
+ "use_cache": true,
83
+ "use_sliding_window": false,
84
+ "vocab_size": 152064
85
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-7B-Instruct/85aeb3e82bb9189fa256.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-7B-Instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention"
42
+ ],
43
+ "max_position_embeddings": 32768,
44
+ "max_window_layers": 28,
45
+ "model_type": "qwen2",
46
+ "neuron": {
47
+ "_serialized_key": "NxDNeuronConfig",
48
+ "batch_size": 8,
49
+ "capacity_factor": null,
50
+ "checkpoint_id": "Qwen/Qwen2.5-7B-Instruct",
51
+ "checkpoint_revision": "a09a35458c702b33eeacc393d103063234e8bc28",
52
+ "continuous_batching": true,
53
+ "enable_bucketing": false,
54
+ "ep_degree": 1,
55
+ "fused_qkv": false,
56
+ "glu_mlp": true,
57
+ "local_ranks_size": 2,
58
+ "max_batch_size": 8,
59
+ "max_context_length": 4096,
60
+ "max_topk": 256,
61
+ "n_active_tokens": 4096,
62
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
63
+ "on_device_sampling": false,
64
+ "optimum_neuron_version": "0.4.0",
65
+ "output_logits": false,
66
+ "pp_degree": 1,
67
+ "sequence_length": 4096,
68
+ "speculation_length": 0,
69
+ "start_rank_id": 0,
70
+ "target": "trn1",
71
+ "torch_dtype": "bfloat16",
72
+ "tp_degree": 2
73
+ },
74
+ "num_attention_heads": 28,
75
+ "num_hidden_layers": 28,
76
+ "num_key_value_heads": 4,
77
+ "rms_norm_eps": 1e-06,
78
+ "rope_scaling": null,
79
+ "rope_theta": 1000000.0,
80
+ "sliding_window": null,
81
+ "tie_word_embeddings": false,
82
+ "use_cache": true,
83
+ "use_sliding_window": false,
84
+ "vocab_size": 152064
85
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-7B-Instruct/b256ce0e46280fedadb4.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-7B-Instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention"
42
+ ],
43
+ "max_position_embeddings": 32768,
44
+ "max_window_layers": 28,
45
+ "model_type": "qwen2",
46
+ "neuron": {
47
+ "_serialized_key": "NxDNeuronConfig",
48
+ "batch_size": 1,
49
+ "capacity_factor": null,
50
+ "checkpoint_id": "Qwen/Qwen2.5-7B-Instruct",
51
+ "checkpoint_revision": "a09a35458c702b33eeacc393d103063234e8bc28",
52
+ "continuous_batching": false,
53
+ "enable_bucketing": false,
54
+ "ep_degree": 1,
55
+ "fused_qkv": false,
56
+ "glu_mlp": true,
57
+ "local_ranks_size": 2,
58
+ "max_batch_size": 1,
59
+ "max_context_length": 4096,
60
+ "max_topk": 256,
61
+ "n_active_tokens": 4096,
62
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
63
+ "on_device_sampling": true,
64
+ "optimum_neuron_version": "0.4.0",
65
+ "output_logits": false,
66
+ "pp_degree": 1,
67
+ "sequence_length": 4096,
68
+ "speculation_length": 0,
69
+ "start_rank_id": 0,
70
+ "target": "trn1",
71
+ "torch_dtype": "bfloat16",
72
+ "tp_degree": 2
73
+ },
74
+ "num_attention_heads": 28,
75
+ "num_hidden_layers": 28,
76
+ "num_key_value_heads": 4,
77
+ "rms_norm_eps": 1e-06,
78
+ "rope_scaling": null,
79
+ "rope_theta": 1000000.0,
80
+ "sliding_window": null,
81
+ "tie_word_embeddings": false,
82
+ "use_cache": true,
83
+ "use_sliding_window": false,
84
+ "vocab_size": 152064
85
+ }
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.0/qwen2/Qwen/Qwen2.5-7B-Instruct/bbe60ad043d0675f6bd9.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen2.5-7B-Instruct",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen2ForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention"
42
+ ],
43
+ "max_position_embeddings": 32768,
44
+ "max_window_layers": 28,
45
+ "model_type": "qwen2",
46
+ "neuron": {
47
+ "_serialized_key": "NxDNeuronConfig",
48
+ "batch_size": 32,
49
+ "capacity_factor": null,
50
+ "checkpoint_id": "Qwen/Qwen2.5-7B-Instruct",
51
+ "checkpoint_revision": "a09a35458c702b33eeacc393d103063234e8bc28",
52
+ "continuous_batching": true,
53
+ "enable_bucketing": false,
54
+ "ep_degree": 1,
55
+ "fused_qkv": false,
56
+ "glu_mlp": true,
57
+ "local_ranks_size": 8,
58
+ "max_batch_size": 32,
59
+ "max_context_length": 4096,
60
+ "max_topk": 256,
61
+ "n_active_tokens": 4096,
62
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
63
+ "on_device_sampling": true,
64
+ "optimum_neuron_version": "0.4.0",
65
+ "output_logits": false,
66
+ "pp_degree": 1,
67
+ "sequence_length": 4096,
68
+ "speculation_length": 0,
69
+ "start_rank_id": 0,
70
+ "target": "trn1",
71
+ "torch_dtype": "bfloat16",
72
+ "tp_degree": 8
73
+ },
74
+ "num_attention_heads": 28,
75
+ "num_hidden_layers": 28,
76
+ "num_key_value_heads": 4,
77
+ "rms_norm_eps": 1e-06,
78
+ "rope_scaling": null,
79
+ "rope_theta": 1000000.0,
80
+ "sliding_window": null,
81
+ "tie_word_embeddings": false,
82
+ "use_cache": true,
83
+ "use_sliding_window": false,
84
+ "vocab_size": 152064
85
+ }