|
{ |
|
"_name_or_path": "/data_storage/lbw/MMaDA/mmada-training-stage4-llada-instruct/checkpoint-265000/unwrapped_model", |
|
"activation_type": "silu", |
|
"alibi": false, |
|
"alibi_bias_max": 8.0, |
|
"architectures": [ |
|
"LLaDAModelLM" |
|
], |
|
"attention_dropout": 0.0, |
|
"attention_layer_norm": false, |
|
"attention_layer_norm_with_affine": true, |
|
"auto_map": { |
|
"AutoConfig": "configuration_llada.LLaDAConfig", |
|
"AutoModel": "modeling_llada.LLaDAModelLM", |
|
"AutoModelForCausalLM": "modeling_llada.LLaDAModelLM" |
|
}, |
|
"bias_for_layer_norm": false, |
|
"block_group_size": 1, |
|
"block_type": "llama", |
|
"codebook_size": 8192, |
|
"d_model": 4096, |
|
"embedding_dropout": 0.0, |
|
"embedding_size": 134656, |
|
"eos_token_id": 126081, |
|
"flash_attention": false, |
|
"include_bias": false, |
|
"include_qkv_bias": false, |
|
"init_cutoff_factor": null, |
|
"init_device": "meta", |
|
"init_fn": "mitchell", |
|
"init_std": 0.02, |
|
"input_emb_norm": false, |
|
"layer_norm_type": "rms", |
|
"layer_norm_with_affine": true, |
|
"llm_vocab_size": 126464, |
|
"mask_token_id": 126336, |
|
"max_sequence_length": 4096, |
|
"mlp_hidden_size": 12288, |
|
"mlp_ratio": 4, |
|
"model_type": "llada", |
|
"multi_query_attention": null, |
|
"n_heads": 32, |
|
"n_kv_heads": 32, |
|
"n_layers": 32, |
|
"new_vocab_size": 134656, |
|
"num_new_special_tokens": 0, |
|
"num_vq_tokens": 256, |
|
"pad_token_id": 126081, |
|
"precision": "amp_bf16", |
|
"pretrained_model_path": "/data_storage/shared/pretrained_models/LLaDA-8B-Instruct", |
|
"residual_dropout": 0.0, |
|
"rms_norm_eps": 1e-05, |
|
"rope": true, |
|
"rope_full_precision": true, |
|
"rope_theta": 500000.0, |
|
"scale_logits": false, |
|
"tie_word_embeddings": false, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.46.0", |
|
"use_cache": false, |
|
"vocab_size": 134656, |
|
"w_clip_vit": false, |
|
"weight_tying": false |
|
} |
|
|