Yukang commited on
Commit
c91757c
·
verified ·
1 Parent(s): cb882e9

Model save

Browse files
Files changed (6) hide show
  1. README.md +2 -4
  2. all_results.json +3 -3
  3. config.json +1 -1
  4. train_results.json +3 -3
  5. trainer_state.json +11 -11
  6. training_args.bin +1 -1
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  base_model: Qwen/Qwen2.5-7B-Instruct
3
- datasets: open-r1/OpenR1-Math-220k
4
  library_name: transformers
5
  model_name: Qwen2.5-7B-Open-R1-GRPO
6
  tags:
7
  - generated_from_trainer
8
- - open-r1
9
  - trl
10
  - grpo
11
  licence: license
@@ -13,7 +11,7 @@ licence: license
13
 
14
  # Model Card for Qwen2.5-7B-Open-R1-GRPO
15
 
16
- This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) on the [open-r1/OpenR1-Math-220k](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
@@ -29,7 +27,7 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/chenyukang2020-nvidia/huggingface/runs/7lknxmxa)
33
 
34
 
35
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
 
1
  ---
2
  base_model: Qwen/Qwen2.5-7B-Instruct
 
3
  library_name: transformers
4
  model_name: Qwen2.5-7B-Open-R1-GRPO
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - grpo
9
  licence: license
 
11
 
12
  # Model Card for Qwen2.5-7B-Open-R1-GRPO
13
 
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/chenyukang2020-nvidia/huggingface/runs/n1iuzt67)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
  "train_loss": 3.769922398664146e-06,
4
- "train_runtime": 502.782,
5
  "train_samples": 93733,
6
- "train_samples_per_second": 186.429,
7
- "train_steps_per_second": 11.653
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
  "train_loss": 3.769922398664146e-06,
4
+ "train_runtime": 505.2607,
5
  "train_samples": 93733,
6
+ "train_samples_per_second": 185.514,
7
+ "train_steps_per_second": 11.596
8
  }
config.json CHANGED
@@ -22,7 +22,7 @@
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.52.3",
25
- "use_cache": true,
26
  "use_sliding_window": false,
27
  "vocab_size": 152064
28
  }
 
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.52.3",
25
+ "use_cache": false,
26
  "use_sliding_window": false,
27
  "vocab_size": 152064
28
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
  "train_loss": 3.769922398664146e-06,
4
- "train_runtime": 502.782,
5
  "train_samples": 93733,
6
- "train_samples_per_second": 186.429,
7
- "train_steps_per_second": 11.653
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
  "train_loss": 3.769922398664146e-06,
4
+ "train_runtime": 505.2607,
5
  "train_samples": 93733,
6
+ "train_samples_per_second": 185.514,
7
+ "train_steps_per_second": 11.596
8
  }
trainer_state.json CHANGED
@@ -175524,7 +175524,7 @@
175524
  "completions/min_terminated_length": 0.0,
175525
  "epoch": 0.9987198088247845,
175526
  "frac_reward_zero_std": 1.0,
175527
- "grad_norm": 6.168677994468242e-10,
175528
  "kl": 0.071533203125,
175529
  "learning_rate": 1.4376004014216017e-10,
175530
  "loss": 0.0029,
@@ -175554,7 +175554,7 @@
175554
  "completions/min_terminated_length": 0.0,
175555
  "epoch": 0.9988905009814799,
175556
  "frac_reward_zero_std": 1.0,
175557
- "grad_norm": 5.005282155356922e-10,
175558
  "kl": 0.06787109375,
175559
  "learning_rate": 1.1358823698404131e-10,
175560
  "loss": 0.0027,
@@ -175584,7 +175584,7 @@
175584
  "completions/min_terminated_length": 0.0,
175585
  "epoch": 0.9990611931381753,
175586
  "frac_reward_zero_std": 1.0,
175587
- "grad_norm": 4.5292041093372395e-10,
175588
  "kl": 0.0650634765625,
175589
  "learning_rate": 8.696603252866808e-11,
175590
  "loss": 0.0026,
@@ -175614,7 +175614,7 @@
175614
  "completions/min_terminated_length": 0.0,
175615
  "epoch": 0.9992318852948707,
175616
  "frac_reward_zero_std": 1.0,
175617
- "grad_norm": 6.505299056700279e-10,
175618
  "kl": 0.072265625,
175619
  "learning_rate": 6.389343622403844e-11,
175620
  "loss": 0.0029,
@@ -175644,7 +175644,7 @@
175644
  "completions/min_terminated_length": 0.0,
175645
  "epoch": 0.9994025774515661,
175646
  "frac_reward_zero_std": 1.0,
175647
- "grad_norm": 7.184587540539436e-10,
175648
  "kl": 0.06884765625,
175649
  "learning_rate": 4.437045625915737e-11,
175650
  "loss": 0.0028,
@@ -175674,7 +175674,7 @@
175674
  "completions/min_terminated_length": 0.0,
175675
  "epoch": 0.9995732696082615,
175676
  "frac_reward_zero_std": 1.0,
175677
- "grad_norm": 5.548114154084975e-10,
175678
  "kl": 0.0714111328125,
175679
  "learning_rate": 2.839709956625747e-11,
175680
  "loss": 0.0029,
@@ -175704,7 +175704,7 @@
175704
  "completions/min_terminated_length": 0.0,
175705
  "epoch": 0.999743961764957,
175706
  "frac_reward_zero_std": 1.0,
175707
- "grad_norm": 4.3193985862669656e-10,
175708
  "kl": 0.0662841796875,
175709
  "learning_rate": 1.5973371813027273e-11,
175710
  "loss": 0.0027,
@@ -175734,7 +175734,7 @@
175734
  "completions/min_terminated_length": 0.0,
175735
  "epoch": 0.9999146539216524,
175736
  "frac_reward_zero_std": 1.0,
175737
- "grad_norm": 4.106144063560871e-10,
175738
  "kl": 0.0687255859375,
175739
  "learning_rate": 7.099277411493077e-12,
175740
  "loss": 0.0028,
@@ -175754,9 +175754,9 @@
175754
  "step": 5858,
175755
  "total_flos": 0.0,
175756
  "train_loss": 3.769922398664146e-06,
175757
- "train_runtime": 502.782,
175758
- "train_samples_per_second": 186.429,
175759
- "train_steps_per_second": 11.653
175760
  }
175761
  ],
175762
  "logging_steps": 1,
 
175524
  "completions/min_terminated_length": 0.0,
175525
  "epoch": 0.9987198088247845,
175526
  "frac_reward_zero_std": 1.0,
175527
+ "grad_norm": 6.168677684106501e-10,
175528
  "kl": 0.071533203125,
175529
  "learning_rate": 1.4376004014216017e-10,
175530
  "loss": 0.0029,
 
175554
  "completions/min_terminated_length": 0.0,
175555
  "epoch": 0.9988905009814799,
175556
  "frac_reward_zero_std": 1.0,
175557
+ "grad_norm": 5.00527050853811e-10,
175558
  "kl": 0.06787109375,
175559
  "learning_rate": 1.1358823698404131e-10,
175560
  "loss": 0.0027,
 
175584
  "completions/min_terminated_length": 0.0,
175585
  "epoch": 0.9990611931381753,
175586
  "frac_reward_zero_std": 1.0,
175587
+ "grad_norm": 4.529207300217758e-10,
175588
  "kl": 0.0650634765625,
175589
  "learning_rate": 8.696603252866808e-11,
175590
  "loss": 0.0026,
 
175614
  "completions/min_terminated_length": 0.0,
175615
  "epoch": 0.9992318852948707,
175616
  "frac_reward_zero_std": 1.0,
175617
+ "grad_norm": 6.505254899019415e-10,
175618
  "kl": 0.072265625,
175619
  "learning_rate": 6.389343622403844e-11,
175620
  "loss": 0.0029,
 
175644
  "completions/min_terminated_length": 0.0,
175645
  "epoch": 0.9994025774515661,
175646
  "frac_reward_zero_std": 1.0,
175647
+ "grad_norm": 7.184588305303324e-10,
175648
  "kl": 0.06884765625,
175649
  "learning_rate": 4.437045625915737e-11,
175650
  "loss": 0.0028,
 
175674
  "completions/min_terminated_length": 0.0,
175675
  "epoch": 0.9995732696082615,
175676
  "frac_reward_zero_std": 1.0,
175677
+ "grad_norm": 5.548119591819174e-10,
175678
  "kl": 0.0714111328125,
175679
  "learning_rate": 2.839709956625747e-11,
175680
  "loss": 0.0029,
 
175704
  "completions/min_terminated_length": 0.0,
175705
  "epoch": 0.999743961764957,
175706
  "frac_reward_zero_std": 1.0,
175707
+ "grad_norm": 4.3193996079624006e-10,
175708
  "kl": 0.0662841796875,
175709
  "learning_rate": 1.5973371813027273e-11,
175710
  "loss": 0.0027,
 
175734
  "completions/min_terminated_length": 0.0,
175735
  "epoch": 0.9999146539216524,
175736
  "frac_reward_zero_std": 1.0,
175737
+ "grad_norm": 4.10613893427095e-10,
175738
  "kl": 0.0687255859375,
175739
  "learning_rate": 7.099277411493077e-12,
175740
  "loss": 0.0028,
 
175754
  "step": 5858,
175755
  "total_flos": 0.0,
175756
  "train_loss": 3.769922398664146e-06,
175757
+ "train_runtime": 505.2607,
175758
+ "train_samples_per_second": 185.514,
175759
+ "train_steps_per_second": 11.596
175760
  }
175761
  ],
175762
  "logging_steps": 1,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63904ccc51b3f0c21106f66d629c7a09825a8a38ec9cd75c9fbc015dc398b196
3
  size 8440
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20297eedde9646fca6464e949b57008f04406ed98f5c4b86718e3f405dd728c2
3
  size 8440