|
{ |
|
"_attn_implementation_autoset": false, |
|
"_name_or_path": "meta-llama/Llama-3.1-8B", |
|
"add_cross_attention": false, |
|
"architectures": [ |
|
"LlamaForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attribute_map": {}, |
|
"bad_words_ids": null, |
|
"begin_suppress_tokens": null, |
|
"bos_token_id": 128000, |
|
"chunk_size_feed_forward": 0, |
|
"cross_attention_hidden_size": null, |
|
"decoder_start_token_id": null, |
|
"diversity_penalty": 0.0, |
|
"do_sample": false, |
|
"early_stopping": false, |
|
"encoder_no_repeat_ngram_size": 0, |
|
"eos_token_id": 128001, |
|
"exponential_decay_length_penalty": null, |
|
"finetuning_task": null, |
|
"forced_bos_token_id": null, |
|
"forced_eos_token_id": null, |
|
"fused_spec_config": null, |
|
"head_dim": 128, |
|
"hidden_act": "silu", |
|
"hidden_size": 4096, |
|
"id2label": { |
|
"0": "LABEL_0", |
|
"1": "LABEL_1" |
|
}, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 14336, |
|
"is_decoder": false, |
|
"is_encoder_decoder": false, |
|
"label2id": { |
|
"LABEL_0": 0, |
|
"LABEL_1": 1 |
|
}, |
|
"length_penalty": 1.0, |
|
"max_length": 20, |
|
"max_position_embeddings": 131072, |
|
"min_length": 0, |
|
"mlp_bias": false, |
|
"model_type": "llama", |
|
"neuron_config": { |
|
"activation_quantization_type": null, |
|
"allow_input_truncation": false, |
|
"async_mode": false, |
|
"attn_cls": "NeuronLlamaAttention", |
|
"attn_kernel_enabled": false, |
|
"batch_size": 32, |
|
"bucket_n_active_tokens": false, |
|
"buckets": [ |
|
16000 |
|
], |
|
"cc_pipeline_tiling_factor": 2, |
|
"context_encoding_buckets": null, |
|
"cp_max_num_seqs": 0, |
|
"cp_num_active_blocks": 0, |
|
"ctx_batch_size": 1, |
|
"draft_model_modules_to_not_convert": null, |
|
"enable_bucketing": true, |
|
"enable_eagle_draft_input_norm": false, |
|
"enable_eagle_speculation": false, |
|
"enable_fused_speculation": false, |
|
"enable_token_tree": false, |
|
"ep_degree": 1, |
|
"flash_decoding_enabled": false, |
|
"fused_qkv": false, |
|
"is_block_kv_layout": false, |
|
"is_chunked_prefill": false, |
|
"is_continuous_batching": true, |
|
"is_eagle_draft": false, |
|
"is_medusa": false, |
|
"is_prefill_stage": null, |
|
"is_prefix_caching": false, |
|
"kv_cache_batch_size": 32, |
|
"kv_cache_padding_size": 0, |
|
"kv_cache_quant": false, |
|
"kv_cache_tiling": false, |
|
"local_ranks_size": 16, |
|
"logical_nc_config": 1, |
|
"lora_config": null, |
|
"max_batch_size": 32, |
|
"max_context_length": 16000, |
|
"max_length": 16000, |
|
"max_new_tokens": null, |
|
"medusa_speculation_length": 0, |
|
"medusa_tree": null, |
|
"mlp_kernel_enabled": false, |
|
"mlp_kernel_fuse_residual_add": false, |
|
"modules_to_not_convert": null, |
|
"n_active_tokens": 16000, |
|
"n_positions": 16000, |
|
"num_medusa_heads": 0, |
|
"on_cpu": false, |
|
"on_device_sampling_config": { |
|
"deterministic": false, |
|
"do_sample": false, |
|
"dynamic": true, |
|
"global_topk": 256, |
|
"on_device_sampling_config": true, |
|
"temperature": 1.0, |
|
"top_k": 1, |
|
"top_p": 1.0 |
|
}, |
|
"output_logits": false, |
|
"overrides_torch_dtype": true, |
|
"pa_block_size": 16000, |
|
"pa_num_blocks": 32, |
|
"padding_side": "right", |
|
"pp_degree": 1, |
|
"qk_layernorm": false, |
|
"qkv_kernel_enabled": false, |
|
"qkv_kernel_nbsd_layout": false, |
|
"quantization_dtype": "int8", |
|
"quantization_type": "per_tensor_symmetric", |
|
"quantize_clamp_bound": Infinity, |
|
"quantized": false, |
|
"quantized_checkpoints_path": null, |
|
"quantized_mlp_kernel_enabled": false, |
|
"rmsnorm_quantize_kernel_enabled": false, |
|
"rpl_reduce_dtype": "bfloat16", |
|
"save_sharded_checkpoint": true, |
|
"seq_len": 16000, |
|
"sequence_parallel_enabled": false, |
|
"skip_sharding": false, |
|
"skip_warmup": true, |
|
"spec_batch_size": 32, |
|
"speculation_length": 0, |
|
"start_rank_id": 0, |
|
"target": null, |
|
"tkg_batch_size": 32, |
|
"token_generation_buckets": null, |
|
"token_tree_config": null, |
|
"torch_dtype": "bfloat16", |
|
"tp_degree": 16, |
|
"vocab_parallel": false, |
|
"weights_to_skip_layout_optimization": [], |
|
"world_size": 16 |
|
}, |
|
"no_repeat_ngram_size": 0, |
|
"num_attention_heads": 32, |
|
"num_beam_groups": 1, |
|
"num_beams": 1, |
|
"num_cores_per_group": 1, |
|
"num_hidden_layers": 32, |
|
"num_key_value_heads": 8, |
|
"num_return_sequences": 1, |
|
"output_attentions": false, |
|
"output_hidden_states": false, |
|
"output_scores": false, |
|
"pad_token_id": null, |
|
"prefix": null, |
|
"pretraining_tp": 1, |
|
"problem_type": null, |
|
"pruned_heads": {}, |
|
"remove_invalid_values": false, |
|
"repetition_penalty": 1.0, |
|
"return_dict": true, |
|
"return_dict_in_generate": false, |
|
"rms_norm_eps": 1e-05, |
|
"rope_scaling": { |
|
"factor": 8.0, |
|
"high_freq_factor": 4.0, |
|
"low_freq_factor": 1.0, |
|
"original_max_position_embeddings": 8192, |
|
"rope_type": "llama3" |
|
}, |
|
"rope_theta": 500000.0, |
|
"sep_token_id": null, |
|
"suppress_tokens": null, |
|
"task_specific_params": null, |
|
"temperature": 1.0, |
|
"tf_legacy_loss": false, |
|
"tie_encoder_decoder": false, |
|
"tie_word_embeddings": false, |
|
"tokenizer_class": null, |
|
"top_k": 50, |
|
"top_p": 1.0, |
|
"torchscript": false, |
|
"transformers_version": "4.51.3", |
|
"typical_p": 1.0, |
|
"use_bfloat16": false, |
|
"use_cache": true, |
|
"vocab_size": 128256 |
|
} |
|
|