maplekeng commited on
Commit
8327b1f
·
verified ·
1 Parent(s): 94c8782

End of training

Browse files
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- base_model: Gensyn/Qwen2.5-0.5B-Instruct
3
  library_name: transformers
4
  model_name: Qwen2.5-0.5B-Instruct-Gensyn-Swarm-sly_nimble_lemur
5
  tags:
@@ -14,7 +14,7 @@ licence: license
14
 
15
  # Model Card for Qwen2.5-0.5B-Instruct-Gensyn-Swarm-sly_nimble_lemur
16
 
17
- This model is a fine-tuned version of [Gensyn/Qwen2.5-0.5B-Instruct](https://huggingface.co/Gensyn/Qwen2.5-0.5B-Instruct).
18
  It has been trained using [TRL](https://github.com/huggingface/trl).
19
 
20
  ## Quick start
 
1
  ---
2
+ base_model: unsloth/Qwen2.5-0.5B-Instruct
3
  library_name: transformers
4
  model_name: Qwen2.5-0.5B-Instruct-Gensyn-Swarm-sly_nimble_lemur
5
  tags:
 
14
 
15
  # Model Card for Qwen2.5-0.5B-Instruct-Gensyn-Swarm-sly_nimble_lemur
16
 
17
+ This model is a fine-tuned version of [unsloth/Qwen2.5-0.5B-Instruct](https://huggingface.co/unsloth/Qwen2.5-0.5B-Instruct).
18
  It has been trained using [TRL](https://github.com/huggingface/trl).
19
 
20
  ## Quick start
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 6.447264695452759e-06,
4
- "train_runtime": 10252.1281,
5
- "train_samples": 17,
6
- "train_samples_per_second": 0.016,
7
- "train_steps_per_second": 0.001
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 1.5658140182495117e-05,
4
+ "train_runtime": 2348.1696,
5
+ "train_samples": 4,
6
+ "train_samples_per_second": 0.009,
7
+ "train_steps_per_second": 0.002
8
  }
config.json CHANGED
@@ -3,7 +3,6 @@
3
  "Qwen2ForCausalLM"
4
  ],
5
  "attention_dropout": 0.0,
6
- "bos_token_id": 151643,
7
  "eos_token_id": 151645,
8
  "hidden_act": "silu",
9
  "hidden_size": 896,
@@ -15,13 +14,15 @@
15
  "num_attention_heads": 14,
16
  "num_hidden_layers": 24,
17
  "num_key_value_heads": 2,
 
18
  "rms_norm_eps": 1e-06,
19
  "rope_scaling": null,
20
  "rope_theta": 1000000.0,
21
- "sliding_window": 32768,
22
  "tie_word_embeddings": true,
23
  "torch_dtype": "float32",
24
  "transformers_version": "4.51.2",
 
25
  "use_cache": true,
26
  "use_sliding_window": false,
27
  "vocab_size": 151936
 
3
  "Qwen2ForCausalLM"
4
  ],
5
  "attention_dropout": 0.0,
 
6
  "eos_token_id": 151645,
7
  "hidden_act": "silu",
8
  "hidden_size": 896,
 
14
  "num_attention_heads": 14,
15
  "num_hidden_layers": 24,
16
  "num_key_value_heads": 2,
17
+ "pad_token_id": 151654,
18
  "rms_norm_eps": 1e-06,
19
  "rope_scaling": null,
20
  "rope_theta": 1000000.0,
21
+ "sliding_window": null,
22
  "tie_word_embeddings": true,
23
  "torch_dtype": "float32",
24
  "transformers_version": "4.51.2",
25
+ "unsloth_fixed": true,
26
  "use_cache": true,
27
  "use_sliding_window": false,
28
  "vocab_size": 151936
generation_config.json CHANGED
@@ -5,7 +5,8 @@
5
  151645,
6
  151643
7
  ],
8
- "pad_token_id": 151643,
 
9
  "repetition_penalty": 1.1,
10
  "temperature": 0.7,
11
  "top_k": 20,
 
5
  151645,
6
  151643
7
  ],
8
+ "max_length": 32768,
9
+ "pad_token_id": 151654,
10
  "repetition_penalty": 1.1,
11
  "temperature": 0.7,
12
  "top_k": 20,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1d1bfde27d5e6be495333e581102a6579a06b7e6a052a1a55513f1ec93bcc2f
3
  size 1976163472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3dd3620f46a1849ce2f350def4f07c571ec2c8b00086e326e6ec8a180f53071
3
  size 1976163472
special_tokens_map.json CHANGED
@@ -22,7 +22,7 @@
22
  "single_word": false
23
  },
24
  "pad_token": {
25
- "content": "<|endoftext|>",
26
  "lstrip": false,
27
  "normalized": false,
28
  "rstrip": false,
 
22
  "single_word": false
23
  },
24
  "pad_token": {
25
+ "content": "<|vision_pad|>",
26
  "lstrip": false,
27
  "normalized": false,
28
  "rstrip": false,
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5eee858c5123a4279c3e1f7b81247343f356ac767940b2692a928ad929543214
3
- size 11422063
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64e71213db910f5cafa86d35091f37393dcc344b1bbc34091d1b3eed4cca01d5
3
+ size 11422064
tokenizer_config.json CHANGED
@@ -200,8 +200,9 @@
200
  "eos_token": "<|im_end|>",
201
  "errors": "replace",
202
  "extra_special_tokens": {},
203
- "model_max_length": 131072,
204
- "pad_token": "<|endoftext|>",
 
205
  "split_special_tokens": false,
206
  "tokenizer_class": "Qwen2Tokenizer",
207
  "unk_token": null
 
200
  "eos_token": "<|im_end|>",
201
  "errors": "replace",
202
  "extra_special_tokens": {},
203
+ "model_max_length": 32768,
204
+ "pad_token": "<|vision_pad|>",
205
+ "padding_side": "left",
206
  "split_special_tokens": false,
207
  "tokenizer_class": "Qwen2Tokenizer",
208
  "unk_token": null
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 6.447264695452759e-06,
4
- "train_runtime": 10252.1281,
5
- "train_samples": 17,
6
- "train_samples_per_second": 0.016,
7
- "train_steps_per_second": 0.001
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 1.5658140182495117e-05,
4
+ "train_runtime": 2348.1696,
5
+ "train_samples": 4,
6
+ "train_samples_per_second": 0.009,
7
+ "train_steps_per_second": 0.002
8
  }
trainer_state.json CHANGED
@@ -2,123 +2,78 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 3.4705882352941178,
6
  "eval_steps": 500,
7
- "global_step": 10,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
- "completion_length": 129.0625,
14
- "epoch": 0.9411764705882353,
15
- "grad_norm": 5.772158622741699,
16
  "kl": 0.0,
17
  "learning_rate": 5e-07,
18
  "loss": -0.0,
19
- "reward": 0.591631580144167,
20
- "reward_std": 0.2950996853032848,
21
  "rewards/concensus_correctness_reward_func": 0.0,
22
  "rewards/consensus_reward_func": 0.0,
23
  "rewards/cumulative_reward_2": 0.0,
24
- "rewards/final_correctness_reward_func": 0.0625,
25
- "rewards/question_recreation_reward_func": 0.500725319609046,
26
  "rewards/soft_format_reward_func": 0.0,
27
  "rewards/strict_format_reward_func": 0.0,
28
- "rewards/xmlcount_reward_func": 0.02840625075623393,
29
  "step": 2
30
  },
31
  {
32
- "completion_length": 129.88888888888889,
33
- "epoch": 1.4705882352941178,
34
- "grad_norm": 4.538054943084717,
35
- "kl": 0.003908961649156279,
36
- "learning_rate": 4.415111107797445e-07,
37
  "loss": 0.0,
38
- "reward": 0.5140680852863524,
39
- "reward_std": 0.10897246843928264,
40
  "rewards/concensus_correctness_reward_func": 0.0,
41
  "rewards/consensus_reward_func": 0.0,
42
  "rewards/cumulative_reward_2": 0.0,
43
  "rewards/final_correctness_reward_func": 0.0,
44
- "rewards/question_recreation_reward_func": 0.49312362840606105,
45
  "rewards/soft_format_reward_func": 0.0,
46
  "rewards/strict_format_reward_func": 0.0,
47
- "rewards/xmlcount_reward_func": 0.020944445704420406,
48
  "step": 4
49
  },
50
  {
51
- "completion_length": 138.77777777777777,
52
- "epoch": 2.0,
53
- "grad_norm": 5.477197647094727,
54
- "kl": 0.008041509905726545,
55
- "learning_rate": 2.934120444167326e-07,
56
- "loss": 0.0,
57
- "reward": 0.7325452516476313,
58
- "reward_std": 0.1888044167103039,
59
- "rewards/concensus_correctness_reward_func": 0.0,
60
- "rewards/consensus_reward_func": 0.0,
61
- "rewards/cumulative_reward_2": 0.0,
62
- "rewards/final_correctness_reward_func": 0.2222222222222222,
63
- "rewards/question_recreation_reward_func": 0.5095452732510037,
64
- "rewards/soft_format_reward_func": 0.0,
65
- "rewards/strict_format_reward_func": 0.0,
66
- "rewards/xmlcount_reward_func": 0.0007777793022493521,
67
- "step": 6
68
- },
69
- {
70
- "completion_length": 154.5,
71
- "epoch": 2.9411764705882355,
72
- "grad_norm": 4.317551136016846,
73
- "kl": 0.016037766326917335,
74
- "learning_rate": 1.2500000000000005e-07,
75
- "loss": 0.0,
76
- "reward": 0.5921503761783242,
77
- "reward_std": 0.3880798064637929,
78
- "rewards/concensus_correctness_reward_func": 0.0,
79
- "rewards/consensus_reward_func": 0.0,
80
- "rewards/cumulative_reward_2": 0.0,
81
- "rewards/final_correctness_reward_func": 0.125,
82
- "rewards/question_recreation_reward_func": 0.5306503740139306,
83
- "rewards/soft_format_reward_func": 0.0,
84
- "rewards/strict_format_reward_func": 0.0,
85
- "rewards/xmlcount_reward_func": -0.0635000029578805,
86
- "step": 8
87
- },
88
- {
89
- "completion_length": 145.83333333333334,
90
- "epoch": 3.4705882352941178,
91
- "grad_norm": 5.180707931518555,
92
- "kl": 0.016869528788245387,
93
- "learning_rate": 1.507684480352292e-08,
94
- "loss": 0.0,
95
- "reward": 0.6629479693041908,
96
- "reward_std": 0.28609387824932736,
97
  "rewards/concensus_correctness_reward_func": 0.0,
98
  "rewards/consensus_reward_func": 0.0,
99
  "rewards/cumulative_reward_2": 0.0,
100
  "rewards/final_correctness_reward_func": 0.0,
101
- "rewards/question_recreation_reward_func": 0.567670188844204,
102
  "rewards/soft_format_reward_func": 0.0,
103
  "rewards/strict_format_reward_func": 0.0,
104
- "rewards/xmlcount_reward_func": 0.09527777466509077,
105
- "step": 10
106
- },
107
- {
108
- "epoch": 3.4705882352941178,
109
- "step": 10,
110
  "total_flos": 0.0,
111
- "train_loss": 6.447264695452759e-06,
112
- "train_runtime": 10252.1281,
113
- "train_samples_per_second": 0.016,
114
- "train_steps_per_second": 0.001
115
  }
116
  ],
117
  "logging_steps": 2,
118
- "max_steps": 10,
119
  "num_input_tokens_seen": 0,
120
- "num_train_epochs": 5,
121
- "save_steps": 25,
122
  "stateful_callbacks": {
123
  "TrainerControl": {
124
  "args": {
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 2.5,
6
  "eval_steps": 500,
7
+ "global_step": 5,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
+ "completion_length": 126.0,
14
+ "epoch": 1.0,
15
+ "grad_norm": 17.677051544189453,
16
  "kl": 0.0,
17
  "learning_rate": 5e-07,
18
  "loss": -0.0,
19
+ "reward": 0.09396408637985587,
20
+ "reward_std": 0.02489092040923424,
21
  "rewards/concensus_correctness_reward_func": 0.0,
22
  "rewards/consensus_reward_func": 0.0,
23
  "rewards/cumulative_reward_2": 0.0,
24
+ "rewards/final_correctness_reward_func": 0.0,
25
+ "rewards/question_recreation_reward_func": 0.07896408578380942,
26
  "rewards/soft_format_reward_func": 0.0,
27
  "rewards/strict_format_reward_func": 0.0,
28
+ "rewards/xmlcount_reward_func": 0.014999999664723873,
29
  "step": 2
30
  },
31
  {
32
+ "completion_length": 93.5,
33
+ "epoch": 2.0,
34
+ "grad_norm": 22.663196563720703,
35
+ "kl": 0.02060387669780539,
36
+ "learning_rate": 2.5e-07,
37
  "loss": 0.0,
38
+ "reward": 0.058309531887061894,
39
+ "reward_std": 0.02309032447374193,
40
  "rewards/concensus_correctness_reward_func": 0.0,
41
  "rewards/consensus_reward_func": 0.0,
42
  "rewards/cumulative_reward_2": 0.0,
43
  "rewards/final_correctness_reward_func": 0.0,
44
+ "rewards/question_recreation_reward_func": 0.08130953146610409,
45
  "rewards/soft_format_reward_func": 0.0,
46
  "rewards/strict_format_reward_func": 0.0,
47
+ "rewards/xmlcount_reward_func": -0.023000000044703484,
48
  "step": 4
49
  },
50
  {
51
+ "completion_length": 78.5,
52
+ "epoch": 2.5,
53
+ "kl": 0.03730800951598212,
54
+ "reward": 0.09967234451323748,
55
+ "reward_std": 0.01814919151365757,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  "rewards/concensus_correctness_reward_func": 0.0,
57
  "rewards/consensus_reward_func": 0.0,
58
  "rewards/cumulative_reward_2": 0.0,
59
  "rewards/final_correctness_reward_func": 0.0,
60
+ "rewards/question_recreation_reward_func": 0.09967234451323748,
61
  "rewards/soft_format_reward_func": 0.0,
62
  "rewards/strict_format_reward_func": 0.0,
63
+ "rewards/xmlcount_reward_func": 0.0,
64
+ "step": 5,
 
 
 
 
65
  "total_flos": 0.0,
66
+ "train_loss": 1.5658140182495117e-05,
67
+ "train_runtime": 2348.1696,
68
+ "train_samples_per_second": 0.009,
69
+ "train_steps_per_second": 0.002
70
  }
71
  ],
72
  "logging_steps": 2,
73
+ "max_steps": 5,
74
  "num_input_tokens_seen": 0,
75
+ "num_train_epochs": 3,
76
+ "save_steps": 10,
77
  "stateful_callbacks": {
78
  "TrainerControl": {
79
  "args": {
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1ae537136e5e8372083a14299b6eb3734101d7a7a4ec98e326d5b435ef1cb47
3
  size 5944
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32f80c30de5400cd309ee7238c1eeac7b9a967cb678460006d0b306cb7d16ec2
3
  size 5944