chansung commited on
Commit
ef5bf85
·
verified ·
1 Parent(s): 7f08e5c

Model save

Browse files
Files changed (4) hide show
  1. README.md +2 -4
  2. all_results.json +3 -3
  3. train_results.json +3 -3
  4. trainer_state.json +21 -7
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  base_model: coolcui/DeepSeek-R1-Distill-Qwen-1.5B-GRPO
3
- datasets: chansung/verifiable-coding-problems-python
4
  library_name: transformers
5
  model_name: Qwen2.5-1.5B-CCRL-1
6
  tags:
7
  - generated_from_trainer
8
- - open-r1
9
  - trl
10
  - grpo
11
  licence: license
@@ -13,7 +11,7 @@ licence: license
13
 
14
  # Model Card for Qwen2.5-1.5B-CCRL-1
15
 
16
- This model is a fine-tuned version of [coolcui/DeepSeek-R1-Distill-Qwen-1.5B-GRPO](https://huggingface.co/coolcui/DeepSeek-R1-Distill-Qwen-1.5B-GRPO) on the [chansung/verifiable-coding-problems-python](https://huggingface.co/datasets/chansung/verifiable-coding-problems-python) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
@@ -29,7 +27,7 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/chansung18/huggingface/runs/opz13onj)
33
 
34
 
35
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
 
1
  ---
2
  base_model: coolcui/DeepSeek-R1-Distill-Qwen-1.5B-GRPO
 
3
  library_name: transformers
4
  model_name: Qwen2.5-1.5B-CCRL-1
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - grpo
9
  licence: license
 
11
 
12
  # Model Card for Qwen2.5-1.5B-CCRL-1
13
 
14
+ This model is a fine-tuned version of [coolcui/DeepSeek-R1-Distill-Qwen-1.5B-GRPO](https://huggingface.co/coolcui/DeepSeek-R1-Distill-Qwen-1.5B-GRPO).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/chansung18/huggingface/runs/oyhi6d8w)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
  "train_loss": 0.0,
4
- "train_runtime": 13359.4688,
5
  "train_samples": 949,
6
- "train_samples_per_second": 0.479,
7
- "train_steps_per_second": 0.004
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
  "train_loss": 0.0,
4
+ "train_runtime": 295.3751,
5
  "train_samples": 949,
6
+ "train_samples_per_second": 21.667,
7
+ "train_steps_per_second": 0.169
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
  "train_loss": 0.0,
4
- "train_runtime": 13359.4688,
5
  "train_samples": 949,
6
- "train_samples_per_second": 0.479,
7
- "train_steps_per_second": 0.004
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
  "train_loss": 0.0,
4
+ "train_runtime": 295.3751,
5
  "train_samples": 949,
6
+ "train_samples_per_second": 21.667,
7
+ "train_steps_per_second": 0.169
8
  }
trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 0.8403361344537815,
6
  "eval_steps": 500,
7
- "global_step": 50,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -710,13 +710,27 @@
710
  "step": 50
711
  },
712
  {
713
- "epoch": 0.8403361344537815,
714
- "step": 50,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
715
  "total_flos": 0.0,
716
  "train_loss": 0.0,
717
- "train_runtime": 13359.4688,
718
- "train_samples_per_second": 0.479,
719
- "train_steps_per_second": 0.004
720
  }
721
  ],
722
  "logging_steps": 1,
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 0.8571428571428571,
6
  "eval_steps": 500,
7
+ "global_step": 51,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
710
  "step": 50
711
  },
712
  {
713
+ "clip_ratio": 0.0,
714
+ "completion_length": 1704.546875,
715
+ "epoch": 0.8571428571428571,
716
+ "grad_norm": 0.0,
717
+ "kl": 0.0,
718
+ "learning_rate": 0.0,
719
+ "loss": 0.0,
720
+ "num_tokens": 281542.0,
721
+ "reward": 0.0,
722
+ "reward_std": 0.0,
723
+ "rewards/curriculum_aware_reward_fn": 0.0,
724
+ "step": 51
725
+ },
726
+ {
727
+ "epoch": 0.8571428571428571,
728
+ "step": 51,
729
  "total_flos": 0.0,
730
  "train_loss": 0.0,
731
+ "train_runtime": 295.3751,
732
+ "train_samples_per_second": 21.667,
733
+ "train_steps_per_second": 0.169
734
  }
735
  ],
736
  "logging_steps": 1,