YeongminKim commited on
Commit
b2eb8f8
·
verified ·
1 Parent(s): bdcf989

Model save

Browse files
README.md CHANGED
@@ -3,16 +3,10 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: alignment-handbook/zephyr-7b-sft-full
5
  tags:
6
- - alignment-handbook
7
- - trl
8
- - dpo
9
- - generated_from_trainer
10
  - trl
11
  - dpo
12
  - alignment-handbook
13
  - generated_from_trainer
14
- datasets:
15
- - HuggingFaceH4/ultrafeedback_binarized
16
  model-index:
17
  - name: zephyr-7b-dpo-full
18
  results: []
@@ -23,17 +17,17 @@ should probably proofread and complete it, then remove this comment. -->
23
 
24
  # zephyr-7b-dpo-full
25
 
26
- This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the HuggingFaceH4/ultrafeedback_binarized dataset.
27
  It achieves the following results on the evaluation set:
28
- - Loss: -0.0120
29
- - Rewards/chosen: -1.5839
30
- - Rewards/rejected: -2.6984
 
 
31
  - Rewards/accuracies: 0.7857
32
- - Rewards/margins: 1.1145
33
- - Logps/rejected: -530.0449
34
- - Logps/chosen: -440.3677
35
- - Logits/rejected: 1.9295
36
- - Logits/chosen: 0.5950
37
 
38
  ## Model description
39
 
 
3
  license: apache-2.0
4
  base_model: alignment-handbook/zephyr-7b-sft-full
5
  tags:
 
 
 
 
6
  - trl
7
  - dpo
8
  - alignment-handbook
9
  - generated_from_trainer
 
 
10
  model-index:
11
  - name: zephyr-7b-dpo-full
12
  results: []
 
17
 
18
  # zephyr-7b-dpo-full
19
 
20
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
21
  It achieves the following results on the evaluation set:
22
+ - Logits/chosen: 0.5837
23
+ - Logits/rejected: 1.9213
24
+ - Logps/chosen: -439.8712
25
+ - Logps/rejected: -529.8352
26
+ - Loss: 0.7707
27
  - Rewards/accuracies: 0.7857
28
+ - Rewards/chosen: -1.5789
29
+ - Rewards/margins: 1.1174
30
+ - Rewards/rejected: -2.6963
 
 
31
 
32
  ## Model description
33
 
all_results.json CHANGED
@@ -15,8 +15,8 @@
15
  "eval_steps_per_second": 0.144,
16
  "total_flos": 0.0,
17
  "train_loss": 0.0,
18
- "train_runtime": 2.6585,
19
  "train_samples": 61134,
20
- "train_samples_per_second": 22996.052,
21
- "train_steps_per_second": 359.231
22
  }
 
15
  "eval_steps_per_second": 0.144,
16
  "total_flos": 0.0,
17
  "train_loss": 0.0,
18
+ "train_runtime": 2.3316,
19
  "train_samples": 61134,
20
+ "train_samples_per_second": 26219.362,
21
+ "train_steps_per_second": 409.584
22
  }
config.json CHANGED
@@ -22,6 +22,6 @@
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.44.2",
25
- "use_cache": true,
26
  "vocab_size": 32000
27
  }
 
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.44.2",
25
+ "use_cache": false,
26
  "vocab_size": 32000
27
  }
runs/Apr19_07-04-57_aai-a1003/events.out.tfevents.1745014052.aai-a1003.181396.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4da894af2a05a653f506470b3d7c66bcea371f94fd8b2b4d13590d7619e8753
3
+ size 6518
train_results.json CHANGED
@@ -2,8 +2,8 @@
2
  "epoch": 0.9994767137624281,
3
  "total_flos": 0.0,
4
  "train_loss": 0.0,
5
- "train_runtime": 2.6585,
6
  "train_samples": 61134,
7
- "train_samples_per_second": 22996.052,
8
- "train_steps_per_second": 359.231
9
  }
 
2
  "epoch": 0.9994767137624281,
3
  "total_flos": 0.0,
4
  "train_loss": 0.0,
5
+ "train_runtime": 2.3316,
6
  "train_samples": 61134,
7
+ "train_samples_per_second": 26219.362,
8
+ "train_steps_per_second": 409.584
9
  }
trainer_state.json CHANGED
@@ -1597,9 +1597,9 @@
1597
  "step": 955,
1598
  "total_flos": 0.0,
1599
  "train_loss": 0.0,
1600
- "train_runtime": 2.6585,
1601
- "train_samples_per_second": 22996.052,
1602
- "train_steps_per_second": 359.231
1603
  }
1604
  ],
1605
  "logging_steps": 10,
 
1597
  "step": 955,
1598
  "total_flos": 0.0,
1599
  "train_loss": 0.0,
1600
+ "train_runtime": 2.3316,
1601
+ "train_samples_per_second": 26219.362,
1602
+ "train_steps_per_second": 409.584
1603
  }
1604
  ],
1605
  "logging_steps": 10,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e6fe0039cff9858c99511576bf5982134bf7265a708760f709aebe2dd79c8cd
3
  size 7480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91e9dad5d87e89f75b8c5a80d2e3aaf2884c3f9d7093b8bb5753e99d9d555184
3
  size 7480