File size: 3,280 Bytes
b7aadca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
{
"architectures": [
"CsmForConditionalGeneration"
],
"attention_bias": false,
"attention_dropout": 0.0,
"audio_eos_token_id": 128003,
"audio_token_id": 128002,
"bos_token_id": 128000,
"codebook_eos_token_id": 0,
"codebook_pad_token_id": 2050,
"codec_config": {
"_name_or_path": "kyutai/mimi",
"architectures": [
"MimiModel"
],
"attention_bias": false,
"attention_dropout": 0.0,
"audio_channels": 1,
"codebook_dim": 256,
"codebook_size": 2048,
"compress": 2,
"dilation_growth_rate": 2,
"frame_rate": 12.5,
"head_dim": 64,
"hidden_act": "gelu",
"hidden_size": 512,
"initializer_range": 0.02,
"intermediate_size": 2048,
"kernel_size": 7,
"last_kernel_size": 3,
"layer_scale_initial_scale": 0.01,
"max_position_embeddings": 8000,
"model_type": "mimi",
"norm_eps": 1e-05,
"normalize": false,
"num_attention_heads": 8,
"num_filters": 64,
"num_hidden_layers": 8,
"num_key_value_heads": 8,
"num_quantizers": 32,
"num_residual_layers": 1,
"num_semantic_quantizers": 1,
"pad_mode": "constant",
"residual_kernel_size": 3,
"rope_theta": 10000.0,
"sampling_rate": 24000,
"sliding_window": 250,
"torch_dtype": "float32",
"trim_right_ratio": 1.0,
"upsample_groups": 512,
"upsampling_ratios": [
8,
6,
5,
4
],
"use_cache": false,
"use_causal_conv": true,
"use_conv_shortcut": false,
"vector_quantization_hidden_dimension": 256
},
"depth_decoder_config": {
"attention_bias": false,
"attention_dropout": 0.0,
"backbone_hidden_size": 2048,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 8192,
"max_position_embeddings": 33,
"mlp_bias": false,
"model_type": "csm_depth_decoder_model",
"num_attention_heads": 8,
"num_codebooks": 32,
"num_hidden_layers": 4,
"num_key_value_heads": 2,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"factor": 32.0,
"high_freq_factor": 0.0078125,
"low_freq_factor": 0.001953125,
"original_max_position_embeddings": 16,
"rope_type": "llama3"
},
"rope_theta": 500000,
"use_cache": true,
"vocab_size": 2051
},
"head_dim": 64,
"hidden_act": "silu",
"hidden_size": 2048,
"initializer_range": 0.02,
"intermediate_size": 8192,
"max_position_embeddings": 2048,
"mlp_bias": false,
"model_type": "csm",
"num_attention_heads": 32,
"num_codebooks": 32,
"num_hidden_layers": 16,
"num_key_value_heads": 8,
"pad_token_id": 128002,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"factor": 32.0,
"high_freq_factor": 0.5,
"low_freq_factor": 0.125,
"original_max_position_embeddings": 1024,
"rope_type": "llama3"
},
"rope_theta": 500000,
"text_vocab_size": 128256,
"tie_codebooks_embeddings": true,
"tie_word_embeddings": false,
"torch_dtype": "float32",
"transformers_version": "4.52.0.dev0",
"use_cache": true,
"vocab_size": 2051,
"audio_num_codebooks": 32,
"audio_vocab_size": 2051,
"backbone_flavor": "llama-1B",
"decoder_flavor": "llama-100M",
"transformers_weights": "transformers.safetensors.index.json"
}
|