cwaud commited on
Commit
d7f431b
·
verified ·
1 Parent(s): 1c1d1d9

Training in progress, step 50

Browse files
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "unsloth/Llama-3.2-1B-Instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": null,
7
  "inference_mode": true,
@@ -20,12 +20,12 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "v_proj",
25
- "gate_proj",
26
  "k_proj",
 
27
  "up_proj",
28
  "o_proj",
 
 
29
  "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/Llama-3.2-3B-Instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": null,
7
  "inference_mode": true,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
23
  "k_proj",
24
+ "gate_proj",
25
  "up_proj",
26
  "o_proj",
27
+ "v_proj",
28
+ "q_proj",
29
  "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c3f769ff1e4ba4c98c03a624ed01a33e854004d0f07165933ad20c79838391d
3
- size 45118424
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef850e00f6ddccf7a92baced38593af32bacbc7deb92d8d8407aa4aa0408de1a
3
+ size 97307544
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_attn_implementation_autoset": true,
3
- "_name_or_path": "unsloth/Llama-3.2-1B-Instruct",
4
  "architectures": [
5
  "LlamaForCausalLM"
6
  ],
@@ -8,16 +8,16 @@
8
  "attention_dropout": 0.0,
9
  "bos_token_id": 128000,
10
  "eos_token_id": 128009,
11
- "head_dim": 64,
12
  "hidden_act": "silu",
13
- "hidden_size": 2048,
14
  "initializer_range": 0.02,
15
  "intermediate_size": 8192,
16
  "max_position_embeddings": 131072,
17
  "mlp_bias": false,
18
  "model_type": "llama",
19
- "num_attention_heads": 32,
20
- "num_hidden_layers": 16,
21
  "num_key_value_heads": 8,
22
  "pretraining_tp": 1,
23
  "rms_norm_eps": 1e-05,
 
1
  {
2
  "_attn_implementation_autoset": true,
3
+ "_name_or_path": "unsloth/Llama-3.2-3B-Instruct",
4
  "architectures": [
5
  "LlamaForCausalLM"
6
  ],
 
8
  "attention_dropout": 0.0,
9
  "bos_token_id": 128000,
10
  "eos_token_id": 128009,
11
+ "head_dim": 128,
12
  "hidden_act": "silu",
13
+ "hidden_size": 3072,
14
  "initializer_range": 0.02,
15
  "intermediate_size": 8192,
16
  "max_position_embeddings": 131072,
17
  "mlp_bias": false,
18
  "model_type": "llama",
19
+ "num_attention_heads": 24,
20
+ "num_hidden_layers": 28,
21
  "num_key_value_heads": 8,
22
  "pretraining_tp": 1,
23
  "rms_norm_eps": 1e-05,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:149f14fb729cab896091dbb95460bb98759373752ba272a6ae1f5cde47ccec9e
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c10884f4be4e91add9795afd80f7b25e35f322adac12d6b37b6d44aa702d50c
3
  size 6776