diff --git a/meta-llama-Llama-2-7b-hf-mxint8/formatted_tensors.json b/meta-llama-Llama-2-7b-hf-mxint8/formatted_tensors.json new file mode 100644 index 0000000000000000000000000000000000000000..19b1cb75da41e42c04844d04a4a8ff4b08bfd4cb --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/formatted_tensors.json @@ -0,0 +1,722 @@ +{ + "meta-llama_Llama-2-7b-hf.model.embed_tokens.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32000, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.embed_tokens.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 4096, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 4096, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 4096, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 128 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 128 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 128 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 128 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32, + 1, + 128 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -2, + "shape": [ + 32, + 128, + 1 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 32, + 1, + 1 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 1 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 1 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32, + 1, + 1 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -2, + "shape": [ + 32, + 1, + 128 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 32, + 1, + 128 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 4096, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add1.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add1.other": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add1.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 11008, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 11008, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 11008 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 4096, + 11008 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add2.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add2.other": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add2.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.norm.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.norm.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.norm.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.norm.output.npy" + }, + "meta-llama_Llama-2-7b-hf.lm_head.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.input.npy" + }, + "meta-llama_Llama-2-7b-hf.lm_head.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32000, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.lm_head.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 32000 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.output.npy" + }, + "input_ids": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.int64", + "shape": [ + 1, + 1 + ] + }, + "int": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/input_ids.npy" + } +} \ No newline at end of file diff --git a/meta-llama-Llama-2-7b-hf-mxint8/input_ids.npy b/meta-llama-Llama-2-7b-hf-mxint8/input_ids.npy new file mode 100644 index 0000000000000000000000000000000000000000..6593c1ffa3b9085aace5bfe3b258f2915b04c94d --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/input_ids.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eb47bb5e8322411424c1d3e4312e472aeb5cf21135a0650a14111ad8ac8b287 +size 136 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..df9d29579609ee8b5f65f7f5a7e674b72d437b3f --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fef09aca4d64e1cbf4dc77219595c05a79b449c4764469f46947c12e9c09cfc +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..ae84108825675dc6df4321bcf40cadd114fe62f3 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc369fa525501149827b8de254156fea89bd89fa2db15adc769d6ebe77f618af +size 512128 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..697208ef82b392a78a4b0dc8b7bf25eaba641ea9 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.lm_head.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:562ba61b2aff15fbc3f097a75441dcdfa2c81c4e549df7cf8c132843e1e09dbd +size 2097152128 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..74e23fac51f5dba00e33129abf4b2c7b6b3e0116 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98c01f35cf299b5c3bad268f643f2dee32d1126712b435d4a2e38febdb63c841 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..9510410f0bfbb128f94cd489c8a8c4855c960024 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1388975e155edbc05fe1c28d0912655aa6265106f5192ef0810a3498c22e979 +size 2097152128 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..74e23fac51f5dba00e33129abf4b2c7b6b3e0116 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98c01f35cf299b5c3bad268f643f2dee32d1126712b435d4a2e38febdb63c841 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..07cac02aed478a1adc29a26c9fcff1ddcb68d4f4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50168efd64364b7850abbfa5e76dd0f8fd33f80b441e1e67761d7249b875640c +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..fb3a6c07e2a8091746a98bc0708730a1e439d782 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7d32e411316624c0ed2f7b8b3f3903d77f2ec4df90a122992967e28ed5443bb +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..fb3a6c07e2a8091746a98bc0708730a1e439d782 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7d32e411316624c0ed2f7b8b3f3903d77f2ec4df90a122992967e28ed5443bb +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..74eb2cd25d90738e343915a634eb0d511552073b --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f30c21843e6f29d3c5dd1df4d9c4b86946ec81e6a98869744b888f0b983eba83 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..8e550325dc79bb16492af2f141f5626163825a4b --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e188b25a4891476f2f2d61ddebe79eddb6c725eb56bc5575094b9098fa61eda +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..74e23fac51f5dba00e33129abf4b2c7b6b3e0116 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98c01f35cf299b5c3bad268f643f2dee32d1126712b435d4a2e38febdb63c841 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..abed340dc60d2f6b9523521d3e1b8c0fdcecf6df --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0eadb0ba19cff507cc1b50fa0adb3057a75be05df95f104c57f5e2824d8a7915 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..198358090f01248c64cb80b8dd05269daacf9203 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a45cb4954612c5ba9b9eac55b717a60fa0d2608e8b5464db0ca98c45e51d1a04 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..13c22b8bb2362521730cf318e39bce6d707bb925 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfd9c0f8ff57ad5ed26a9b7212e11c436848e6f6e803ae4210ff1b358d0afa72 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..ecca79627cc6252155ec4443834a335e8b262597 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eab83152546ebdbd5902900867f768fa2bcd23eba17698c54a943d75d3c5a35c +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..74eb2cd25d90738e343915a634eb0d511552073b --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f30c21843e6f29d3c5dd1df4d9c4b86946ec81e6a98869744b888f0b983eba83 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..8eae46c783a893e41f8ec035bb193ca5f6072a24 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593afa2fc524c28e335c4db7df000ed9e94ea9940cb87a146a8dfb71cbf94fc0 +size 721420416 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..9dcb6949bde20d3bcc758f378b5fd56fdbbac790 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d04504eeff2f40c9e65b5cf9337f1548d24c1c15c34767038209a8dfda597c8 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..198358090f01248c64cb80b8dd05269daacf9203 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a45cb4954612c5ba9b9eac55b717a60fa0d2608e8b5464db0ca98c45e51d1a04 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..37235b93ec2dd2536957717e71e2b95aa6f1f36a --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1520c00f2cee83a6a96805a1fdc51fb9483db5516a1b1b9a673f46329aeba7d0 +size 721420416 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..13c22b8bb2362521730cf318e39bce6d707bb925 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfd9c0f8ff57ad5ed26a9b7212e11c436848e6f6e803ae4210ff1b358d0afa72 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..0cf2db6c5103b6c931825ea0ec6540cf711c121c --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50dd427a7864e6ebe7531b43e1df1c221877b50f025296e3b36b1b3be2a5dc13 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..ce5a77fbac26ade473ec64d56ad9055a93effaf9 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49ce5456ec01ed652d4e287e6df237690d70ad60abe005528846d39ef6cd9253 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..9dcb6949bde20d3bcc758f378b5fd56fdbbac790 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d04504eeff2f40c9e65b5cf9337f1548d24c1c15c34767038209a8dfda597c8 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..0cf2db6c5103b6c931825ea0ec6540cf711c121c --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50dd427a7864e6ebe7531b43e1df1c221877b50f025296e3b36b1b3be2a5dc13 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..d0e30da95a96c537bccc1074050fd77d5e1fef7d --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:720727aaef967c1520e631818799f40f87992c588f21b10c0c76b1f94add94e1 +size 721420416 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..fb3a6c07e2a8091746a98bc0708730a1e439d782 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7d32e411316624c0ed2f7b8b3f3903d77f2ec4df90a122992967e28ed5443bb +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..6dc41dd569ab20d7e99234887a53732e0b3c7e09 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d84d4cf16d800314099f59c72c3dbc8bd50545e12ff918d73a9103fe18502263 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..90c522f6f2c6ef35d47a26cea5b18eb38c5c08eb --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f91f84a998faf60febf5cc370068cc25087dd28529454af6895effc80f749cdf +size 640 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..6ecd0ceb544917fd9833dd5b5976ef474e1b94e8 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc7b5e0415522575497fcd7816ce22b9cc01ce828196f9d8abe3c2022d106b1f +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..dd653be8eac24546e22f7f27ba1fd40e45f97365 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d088e565b13f1e598b9812bd92af28d4126cf5d9f248e49c4ceffc55233256a +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..4948fba62cb1e19933c3c40d0e70398da40e66c4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97d494a98dd8f88a2c74650d0ccebe6ded263d5d1856718954fd3894ce54b922 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..a3f0e1e54058268c033dc3b85192bd0e603d5edf --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fdf4135ec04c03659f548cd317751f23fabadd40445b21c783b3ac58840f428 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..e89002f8860741715e4184e5d7dbe14c9f148bec --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3465164bdf0a65009ee0be368f050dc397bed2bc9cb0fab3047c92d21fe58c8b +size 640 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy new file mode 100644 index 0000000000000000000000000000000000000000..842998568207e6cf03441a968d246cfd250a14a6 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a17f6a8fa699ce8479b88b4f226ceb5cb33f127776aab3e551724ecd7e4a133 +size 640 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy new file mode 100644 index 0000000000000000000000000000000000000000..bae379e173c543699aee73afd50597bcb3faf549 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17cc502b3f0d8fa3f52d7c3292055a318f473fc2116c299449a3d8175e08b212 +size 640 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..3c9a8f64fadacb16a331843a2b42fff90658a3e5 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad9628fee7a934a08186ab804af49e013372cf3c3a22f83ce5b11d578409d240 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..adcd9c675c5654a88eeec33b8618e244c6c91f23 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efc76cb674ffa88850c84c791dd858b2deb036319e63d27cf3a5249e604eb01d +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..4193e54c801f32302aff3702b69aa03a0bf7b3d7 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdaded8169a538789d48e58777e61669d762b0f2c5d583def7210a3028e8c1a8 +size 268435584 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..2958e4af0bbba511e7d7667cf61eb610530922e2 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2fbbefa0d551308808405c34178156e5cbf78fb7a230a5f3ca96a21693dbfbb +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..07cac02aed478a1adc29a26c9fcff1ddcb68d4f4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50168efd64364b7850abbfa5e76dd0f8fd33f80b441e1e67761d7249b875640c +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..fe7f0de629046fea8cd61ca105258ba1f22b6ea4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10e92abe6811d82d6e08a3821ace895ec8796101cb35d4c0c2342428113995bc +size 268435584 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..07cac02aed478a1adc29a26c9fcff1ddcb68d4f4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50168efd64364b7850abbfa5e76dd0f8fd33f80b441e1e67761d7249b875640c +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..3c9a8f64fadacb16a331843a2b42fff90658a3e5 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad9628fee7a934a08186ab804af49e013372cf3c3a22f83ce5b11d578409d240 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..cade7fa97e0b7edec152f60e34c4f835c7e1c35a --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94348e987f572a574ef468f678316072543a214538e166381efc615caf2ccb94 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..1c5cedfb798ca594bfcb3115a49c820c77306ab4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:caf5788f94b619f9edf12d274f0330f0a1500066da663ba6d3423d1cd28fabe9 +size 268435584 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy new file mode 100644 index 0000000000000000000000000000000000000000..ea0dec811a1110086be7fc9f45181a3a237e9eb4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59f4edfb0e4a167b58b680a3550ca9b39101bc02298a845f0413a4fb0b7cfd1a +size 2176 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states.npy new file mode 100644 index 0000000000000000000000000000000000000000..e3a343ce26525276aaf7b4a4ba8a63d7fba42efc --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65882185441ebf6e14126e97874ae56260f8b45dd48e4308d7deb41c3b61bfc6 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states.npy new file mode 100644 index 0000000000000000000000000000000000000000..20badb522c827067b41c4a3636052864425fb81e --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6f5e0922be1bdf9ade5b79608cac6cd2eccd1cff22f07873963ff0d586b0c36 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin.npy new file mode 100644 index 0000000000000000000000000000000000000000..57b4356561460b82e736aa2005af547093c0af67 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f70e10de47e26d2346850ad85a3f0f14fac46668c29ab6c36baf8e70f81e4a33 +size 2176 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..3c9a8f64fadacb16a331843a2b42fff90658a3e5 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad9628fee7a934a08186ab804af49e013372cf3c3a22f83ce5b11d578409d240 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..ce8dd16efa080f516b355c83cf8ea6ecff028842 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2663a5a26c19efaf6eac13573ff6f7d96a7895c79a89c6a6336d8d0343c9ed95 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..079c1a3f0b9838e07d23b9724d56c9ec6ad718f9 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a8d269ff3848bbf9cb0f7229ab080a8edaf73617fd490b612a67b7aaaf4d2bb +size 268435584 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.norm.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.norm.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..00ce6054fd08d903750985b1045df5bf3fc3ba89 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.norm.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db800b28786309d6ee30b6e7edd9976694d4e562b2f175c2c439ef0acc1dc232 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.norm.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.norm.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..71a7ab1010c63cd091d6d1d4bb7e724f82451472 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/meta-llama_Llama-2-7b-hf.model.norm.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12293daca489e22a19ef8ada6f03121ff9bd279131e05a187c7be7d762bf1728 +size 65664 diff --git a/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.weight.npy b/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..989eec0f97c26c09e6cb752139cf2362f96904cc --- /dev/null +++ b/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ebb8861ddea7aa933c9fa71282908854cd10e06bdcb7816685132a818fa70d2 +size 8388736 diff --git a/tinyllama-bf16/formatted_tensors.json b/tinyllama-bf16/formatted_tensors.json new file mode 100644 index 0000000000000000000000000000000000000000..63d561c540cbded23032c7ef98e357fa424ad37a --- /dev/null +++ b/tinyllama-bf16/formatted_tensors.json @@ -0,0 +1,131 @@ +{ + "Cheng98_TinyLlama_v1.1.model.embed_tokens.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 32000, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.embed_tokens.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 2048, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 256, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 256, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 2048, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 5632, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 5632, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 2048, + 5632 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.norm.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.model.norm.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.lm_head.weight": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 32000, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-bf16/Cheng98_TinyLlama_v1.1.lm_head.weight.npy" + } +} \ No newline at end of file diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..69b8fe3bb9208a22fed4ad1ebf3fd7b4b709a957 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:230716dcb27feebb1a176a1ced06e9196cc135559b88fc1e448c3e9f4b847639 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..d1eba867f17aa72584f68a8f6bcbdd21b9f3b309 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18cc61da007292754ea74954a81a94212a5535061102d563380d502808cc8e6f +size 512128 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.weight.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..34205e6edbf848b5bcded0df253eb5e6ca4b4ae8 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc3400a2cebdebe6701224c589db4f14e0f0af2469318dae1b0729eaad459212 +size 1048576128 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.embed_tokens.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.embed_tokens.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..50f16969819a123e4918370494a84b0ba26c51af --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.embed_tokens.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22ef22993938e13566e693e66a144eacd2937b11abab0507fa0ce04418538f09 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.embed_tokens.weight.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.embed_tokens.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..e5fd56ce4fdfeb70267b102499f309d55d5e1c18 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.embed_tokens.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32cf3afe98a03e02b69d5fa08af2205562ece4331c1a492a29948bac02518d98 +size 1048576128 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..50f16969819a123e4918370494a84b0ba26c51af --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22ef22993938e13566e693e66a144eacd2937b11abab0507fa0ce04418538f09 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.other.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..67833e222f1b92dc1aed55721dac9365a6096de5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:583d2a7df989cc9cb0efc85e6facff28c2a5e44af3dd3929295fa20f110a3224 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..7fe6d8eadd5850657f084ebc4107e30b2d876aa5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dbc03c15481554d286a1afd2fc99e3e5838bdc46466aec9196cef19b53ba276 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..7fe6d8eadd5850657f084ebc4107e30b2d876aa5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dbc03c15481554d286a1afd2fc99e3e5838bdc46466aec9196cef19b53ba276 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.other.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..4c870a496c0cc8eea94cb3ee17e245234a9fa208 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8d9ec21bdabc8eb06a5fe6b975a70b92eacf8f59b196586de4b25926c58b826 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..eb344b4d5bf1bd81dc187b9ca97704d4903a398c --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a183b49f21a07076dcf8cee4cf3545ce0b2473cb0bb8ef003611af6b0165cc9d +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..50f16969819a123e4918370494a84b0ba26c51af --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22ef22993938e13566e693e66a144eacd2937b11abab0507fa0ce04418538f09 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..a5ab5b7fb454767ed85767d1863ebb31433ede3a --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcbb0f173e462b4a8b1b5a65218550d9c6323feeebefcff98585a9941002e233 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.act.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.act.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..46c8be5788738c5bec5dd0672a252d9183326f8e --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.act.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc27eb401870be65284f3933187797cc3d98328f0b58ea2d3f854c5a5673ae52 +size 90240 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.act.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.act.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..cac3df027c28684043ca4d6777b62e70b4e5e17b --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.act.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:690f2584902bd2a4e423879b3cd256e14ad1c181eb25162fc0e8504793648b3c +size 90240 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..347d673a3fb408b0893fc55b532ad415fe25a488 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd285690936ea5db7bda288f3b884770d35802a0190b830b9811a109dcabeefb +size 90240 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..4c870a496c0cc8eea94cb3ee17e245234a9fa208 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8d9ec21bdabc8eb06a5fe6b975a70b92eacf8f59b196586de4b25926c58b826 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.weight.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..f061147ea352936a2e33610efd1efc2f6770552b --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2992c8b6f041efe75e4b9f419af051169b5942cb271652eed7fd8da8a57d4b4 +size 184549504 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..47ff586fdab069e4933c0aacec7781af148fd57b --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c21e619d9a3be1c1fc829ad87c70836a67dd94ea1b2b10784be7cec59f482830 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..46c8be5788738c5bec5dd0672a252d9183326f8e --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc27eb401870be65284f3933187797cc3d98328f0b58ea2d3f854c5a5673ae52 +size 90240 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.weight.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..a988d7f7403f23715d8973e6afe14f52cabd02b8 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98793e55eb3fc6f1f67a641303ac58a8130ac270d3e292011380b621da454fc0 +size 184549504 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..cac3df027c28684043ca4d6777b62e70b4e5e17b --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:690f2584902bd2a4e423879b3cd256e14ad1c181eb25162fc0e8504793648b3c +size 90240 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.other.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..be06d469117a2bcfaba186b185a8e52f0f5ea3d5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fd7e8f9feec5aa5109bdd0da4a66fa9bc910ae23144e23422f97b3c86765362 +size 90240 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..452229fad347ca3510892374a10d0974a7a9f4a4 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d46c8618a73b40d8895949f8ede69f901d11d3eeb65ad4205ca5fda75009ca6 +size 90240 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..47ff586fdab069e4933c0aacec7781af148fd57b --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c21e619d9a3be1c1fc829ad87c70836a67dd94ea1b2b10784be7cec59f482830 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..be06d469117a2bcfaba186b185a8e52f0f5ea3d5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fd7e8f9feec5aa5109bdd0da4a66fa9bc910ae23144e23422f97b3c86765362 +size 90240 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.weight.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..2df9b45c855332474e003554fe028e4a805a5e0e --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dda5674048b47a3d367cb56f03b2efbfb2faff3420b660f3005d5e007d4014a3 +size 184549504 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..7fe6d8eadd5850657f084ebc4107e30b2d876aa5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dbc03c15481554d286a1afd2fc99e3e5838bdc46466aec9196cef19b53ba276 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..a9c1197897f8037b95f0cb4276b0f9390e12a5ed --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01d408cc4a4179fec43d8087d9d850625105eaec655a73035f60e532e67f563c +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..90c522f6f2c6ef35d47a26cea5b18eb38c5c08eb --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f91f84a998faf60febf5cc370068cc25087dd28529454af6895effc80f749cdf +size 640 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.other.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..e7a6f6154c615bec7cf9222810443411d2b45be8 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00c5bf2aa1142acb53a6149247078b936101e38c896987a1879d76dcd621cf3b +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..f3697b527947940fd7f82ab45a28e454c3c848ed --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90dd77d003db80eed7cac04729848492dc88e1bc29f0e131cf8d8c1e291d5122 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..c3122e0beeae395c2fbada37dbb646af1f440563 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cc68cb4895462ff42d9a6e1b5726bd511496a962bb781eeeb447826eed735e6 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.other.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..ada7f410a06c5b090319831841b7a4e77b59a33a --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:210b4e0c31bd321d8387056f3d32058293d706ceacb23e894a31a5967c019991 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..5fa0bb67cf2863ae7c20c18e898aa731cacb880e --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37afd814d0353509fc58ab052103bbae9878b8375db9734bbf5387e6a37872ec +size 640 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights_masked.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights_masked.npy new file mode 100644 index 0000000000000000000000000000000000000000..59d98b81e9c594db615ad48a57532cb977dc920f --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights_masked.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd2fbf7d1f88b6e3ca1751c257331f2a60dc24a023b47ffe4de19b4a7fa73b4d +size 640 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights_softmaxed.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights_softmaxed.npy new file mode 100644 index 0000000000000000000000000000000000000000..bae379e173c543699aee73afd50597bcb3faf549 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights_softmaxed.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17cc502b3f0d8fa3f52d7c3292055a318f473fc2116c299449a3d8175e08b212 +size 640 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..2b239791c96dc9fad8617aba34ef44f87262c4d5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e41a199ff9a7da9c31516b9c419bcd7424764e386c15aecb7d373e727675f55f +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..44d57d4016ffb09bc5d18d5efd1cdb7a2de340cd --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c25d8cc8cbbd5c6eb4ecfc060f9838475660623d2b674df499503c64ac8dbaed +size 4224 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.weight.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..f5ff3a19e535bebdfe63d10b7772ba5792deb86c --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c580305c915824c84132665a921438088b039cf039cc1755a09f629769e8219 +size 8388736 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..7f02e47c99c63d9c145abdd42cc166ce398501d7 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20ee35a03bab658c1808be74f1c7036bd49358b84f47a9e053a87c79aaf28f98 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..67833e222f1b92dc1aed55721dac9365a6096de5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:583d2a7df989cc9cb0efc85e6facff28c2a5e44af3dd3929295fa20f110a3224 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.weight.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..28732a533171faee8df02ed5729620ae639679dc --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a253488e548da1d49e24b1ec3bbca650da97adf2784a9e4c7ad2fe747c29cfa +size 67108992 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..67833e222f1b92dc1aed55721dac9365a6096de5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:583d2a7df989cc9cb0efc85e6facff28c2a5e44af3dd3929295fa20f110a3224 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..2b239791c96dc9fad8617aba34ef44f87262c4d5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e41a199ff9a7da9c31516b9c419bcd7424764e386c15aecb7d373e727675f55f +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..5ae63d58712e3bcd55777931e8b88bb79222db8e --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86977eaf8ae2f27ab26654afef96fa09640de70b7239fc59ddf33effeace3de0 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.weight.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..fe41aaf0de652808c44f599784ca04c601d2fca3 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7a2f1ffd15e596f52538ea8b6c040714fd6dd60af0d10d658ba873f38fb3af5 +size 67108992 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.cos.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.cos.npy new file mode 100644 index 0000000000000000000000000000000000000000..0144500b2491cdc4659538d36fda14ad5fec09d1 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.cos.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8506a7675386d50c5c10a18471459fa4893a1346642b126664c1c4cd0511fc4 +size 1152 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.key_states.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.key_states.npy new file mode 100644 index 0000000000000000000000000000000000000000..34c95a3fb1ab73a24bf6f7f5e3b77417aa8e9a42 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.key_states.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:017cfc3ee8cc066450c9178e15ca2bcae1de77a615899a83b3df66e0ce86836f +size 4224 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.query_states.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.query_states.npy new file mode 100644 index 0000000000000000000000000000000000000000..e18893a0cb42585cad65e90e645a56efd9352b0e --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.query_states.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab25a11ada64a9165324f057b3fde0e79d2a6f48afe54c5d6031ac57e77888f8 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.sin.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.sin.npy new file mode 100644 index 0000000000000000000000000000000000000000..40b34fa6f41ada1f9b1a9f522474ec921e50538f --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.sin.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd7e6f44d2c2893cd9824239279faadc0a137f437c3a04f6a9f88d1001578f2f +size 1152 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..2b239791c96dc9fad8617aba34ef44f87262c4d5 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e41a199ff9a7da9c31516b9c419bcd7424764e386c15aecb7d373e727675f55f +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..fcbb1a823b05165daed47a9462b16f7963ec5d6b --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e824de64ed8fad5c5a759f4e284810a6661fc8dea0bccc25d53581cf6bd05905 +size 4224 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.weight.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..8651015931655e86d324f6254d29db476dc08d84 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a963dfc9b0371cc9c4b29272a199a9a92f6eee7a8315368209b7611f5aa819e +size 8388736 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.norm.input.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.norm.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..de84bd2072b37740041096412b3106318d1c98e4 --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.norm.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b94fc62fedd96195dfe434cd41bf4edceec68b0584dc8a30be9413611a6eb619 +size 32896 diff --git a/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.norm.output.npy b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.norm.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..428e079daecacb482e56b18832cd87e7354bd82e --- /dev/null +++ b/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.norm.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36d138cbc19e29051a508c39844b3ff3a80d614a923556e6153f6c5f7be9e7ea +size 32896 diff --git a/tinyllama-mxint8/formatted_tensors.json b/tinyllama-mxint8/formatted_tensors.json new file mode 100644 index 0000000000000000000000000000000000000000..912bff7b777ec3d3ff1f037ab8164c96187972ab --- /dev/null +++ b/tinyllama-mxint8/formatted_tensors.json @@ -0,0 +1,722 @@ +{ + "Cheng98_TinyLlama_v1.1.model.embed_tokens.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32000, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.embed_tokens.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.embed_tokens.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.embed_tokens.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.input_layernorm.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 2048, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.q_proj.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 256, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 256 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.k_proj.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 256, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 256 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.v_proj.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.cos": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 64 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.cos.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.sin": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 64 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.sin.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.query_states": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 64 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.query_states.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.key_states": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 4, + 1, + 64 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.rope.key_states.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32, + 1, + 64 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.other": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -2, + "shape": [ + 32, + 64, + 1 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.other.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 32, + 1, + 1 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights_masked": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 1 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights_masked.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights_softmaxed": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 1 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_weights_softmaxed.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32, + 1, + 1 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.other": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -2, + "shape": [ + 32, + 1, + 64 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.other.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 32, + 1, + 64 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.attn_output.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 2048, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.o_proj.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.self_attn.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.add1.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.add1.other": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.other.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.add1.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add1.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.post_attention_layernorm.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 5632, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 5632 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.gate_proj.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 5632, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 5632 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.up_proj.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.act.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 5632 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.act.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.act.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 5632 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.act.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 5632 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.other": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 5632 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.other.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 5632 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.mult.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 5632 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 2048, + 5632 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.mlp.down_proj.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.add2.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.add2.other": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.other.npy" + }, + "Cheng98_TinyLlama_v1.1.model.layers.0.add2.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.layers.0.add2.output.npy" + }, + "Cheng98_TinyLlama_v1.1.model.norm.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.norm.input.npy" + }, + "Cheng98_TinyLlama_v1.1.model.norm.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 2048 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.model.norm.output.npy" + }, + "Cheng98_TinyLlama_v1.1.lm_head.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.input.npy" + }, + "Cheng98_TinyLlama_v1.1.lm_head.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32000, + 2048 + ] + }, + "exp_mantissa": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.weight.npy" + }, + "Cheng98_TinyLlama_v1.1.lm_head.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 32000 + ] + }, + "hex": "saved_tensors/tinyllama-mxint8/Cheng98_TinyLlama_v1.1.lm_head.output.npy" + }, + "input_ids": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.int64", + "shape": [ + 1, + 1 + ] + }, + "int": "saved_tensors/tinyllama-mxint8/input_ids.npy" + } +} \ No newline at end of file diff --git a/tinyllama-mxint8/input_ids.npy b/tinyllama-mxint8/input_ids.npy new file mode 100644 index 0000000000000000000000000000000000000000..6593c1ffa3b9085aace5bfe3b258f2915b04c94d --- /dev/null +++ b/tinyllama-mxint8/input_ids.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eb47bb5e8322411424c1d3e4312e472aeb5cf21135a0650a14111ad8ac8b287 +size 136