Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +6 -0
- .gitignore +4 -0
- LICENSE +21 -0
- LLM_training.ipynb +704 -0
- Llama3_1_(8B)_Alpaca.ipynb +0 -0
- Methords/TFIDF.py +53 -0
- Phase1/Albert.csv +0 -0
- Phase1/Albert_full.csv +0 -0
- Phase1/Combined.csv +3 -0
- Phase1/Combined_full.csv +3 -0
- Phase1/GloVe.csv +0 -0
- Phase1/GloVe_full.csv +0 -0
- Phase1/TFIDF.csv +0 -0
- Phase1/TFIDF_full.csv +0 -0
- Phase1/Word2Vec.csv +0 -0
- Phase1/Word2Vec_full.csv +0 -0
- all_combined_data.csv +2 -2
- all_combined_data.csv.1 +3 -0
- dataset_explore.ipynb +0 -0
- ensemble_phase1.ipynb +1767 -0
- major-mid-report.pdf +3 -0
- models.py +172 -0
- multi-label-processing.ipynb +0 -0
- multi_label_models.py +94 -0
- outputs/checkpoint-60/README.md +202 -0
- outputs/checkpoint-60/adapter_config.json +39 -0
- outputs/checkpoint-60/adapter_model.safetensors +3 -0
- outputs/checkpoint-60/optimizer.pt +3 -0
- outputs/checkpoint-60/rng_state.pth +3 -0
- outputs/checkpoint-60/scheduler.pt +3 -0
- outputs/checkpoint-60/special_tokens_map.json +23 -0
- outputs/checkpoint-60/tokenizer.json +3 -0
- outputs/checkpoint-60/tokenizer_config.json +2066 -0
- outputs/checkpoint-60/trainer_state.json +454 -0
- outputs/checkpoint-60/training_args.bin +3 -0
- phase_1_ensemble.ipynb +243 -0
- preprocessing.ipynb +0 -0
- problem.pdf +3 -0
- requirements.txt +102 -0
- unsloth-requirements.txt +0 -0
- unsloth_compiled_cache/UnslothBCOTrainer.py +1818 -0
- unsloth_compiled_cache/UnslothCPOTrainer.py +1551 -0
- unsloth_compiled_cache/UnslothDPOTrainer.py +0 -0
- unsloth_compiled_cache/UnslothGKDTrainer.py +857 -0
- unsloth_compiled_cache/UnslothGRPOTrainer.py +1432 -0
- unsloth_compiled_cache/UnslothKTOTrainer.py +1834 -0
- unsloth_compiled_cache/UnslothNashMDTrainer.py +949 -0
- unsloth_compiled_cache/UnslothORPOTrainer.py +1537 -0
- unsloth_compiled_cache/UnslothOnlineDPOTrainer.py +1263 -0
- unsloth_compiled_cache/UnslothPPOTrainer.py +1253 -0
.gitattributes
CHANGED
|
@@ -67,3 +67,9 @@ stack_exchange/IS/Information[[:space:]]Security[[:space:]]2016.csv filter=lfs d
|
|
| 67 |
stack_exchange/IS/merged_data.csv filter=lfs diff=lfs merge=lfs -text
|
| 68 |
stack_exchange/SE/merged_data.csv filter=lfs diff=lfs merge=lfs -text
|
| 69 |
stack_exchange/combined_data.csv filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
stack_exchange/IS/merged_data.csv filter=lfs diff=lfs merge=lfs -text
|
| 68 |
stack_exchange/SE/merged_data.csv filter=lfs diff=lfs merge=lfs -text
|
| 69 |
stack_exchange/combined_data.csv filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
Phase1/Combined.csv filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
Phase1/Combined_full.csv filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
all_combined_data.csv.1 filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
major-mid-report.pdf filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
outputs/checkpoint-60/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 75 |
+
problem.pdf filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
venv/
|
| 2 |
+
*.pyc
|
| 3 |
+
__pycache__/
|
| 4 |
+
data/
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Ayush Gupta
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
LLM_training.ipynb
ADDED
|
@@ -0,0 +1,704 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 6,
|
| 6 |
+
"id": "7e1651a1",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [
|
| 9 |
+
{
|
| 10 |
+
"data": {
|
| 11 |
+
"text/html": [
|
| 12 |
+
"<div>\n",
|
| 13 |
+
"<style scoped>\n",
|
| 14 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 15 |
+
" vertical-align: middle;\n",
|
| 16 |
+
" }\n",
|
| 17 |
+
"\n",
|
| 18 |
+
" .dataframe tbody tr th {\n",
|
| 19 |
+
" vertical-align: top;\n",
|
| 20 |
+
" }\n",
|
| 21 |
+
"\n",
|
| 22 |
+
" .dataframe thead th {\n",
|
| 23 |
+
" text-align: right;\n",
|
| 24 |
+
" }\n",
|
| 25 |
+
"</style>\n",
|
| 26 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 27 |
+
" <thead>\n",
|
| 28 |
+
" <tr style=\"text-align: right;\">\n",
|
| 29 |
+
" <th></th>\n",
|
| 30 |
+
" <th>Input</th>\n",
|
| 31 |
+
" <th>Tags</th>\n",
|
| 32 |
+
" </tr>\n",
|
| 33 |
+
" </thead>\n",
|
| 34 |
+
" <tbody>\n",
|
| 35 |
+
" <tr>\n",
|
| 36 |
+
" <th>0</th>\n",
|
| 37 |
+
" <td>Title: What is the effective differencial effe...</td>\n",
|
| 38 |
+
" <td>['electronics']</td>\n",
|
| 39 |
+
" </tr>\n",
|
| 40 |
+
" <tr>\n",
|
| 41 |
+
" <th>1</th>\n",
|
| 42 |
+
" <td>Title: Heat sensor with fan cooling Body: Can ...</td>\n",
|
| 43 |
+
" <td>['electronics']</td>\n",
|
| 44 |
+
" </tr>\n",
|
| 45 |
+
" <tr>\n",
|
| 46 |
+
" <th>2</th>\n",
|
| 47 |
+
" <td>Title: Outlet Installation--more wires than my...</td>\n",
|
| 48 |
+
" <td>['electronics']</td>\n",
|
| 49 |
+
" </tr>\n",
|
| 50 |
+
" <tr>\n",
|
| 51 |
+
" <th>3</th>\n",
|
| 52 |
+
" <td>Title: Buck Converter Operation Question Body:...</td>\n",
|
| 53 |
+
" <td>['electronics']</td>\n",
|
| 54 |
+
" </tr>\n",
|
| 55 |
+
" <tr>\n",
|
| 56 |
+
" <th>4</th>\n",
|
| 57 |
+
" <td>Title: Urgent help in area of ASIC design, ver...</td>\n",
|
| 58 |
+
" <td>['electronics']</td>\n",
|
| 59 |
+
" </tr>\n",
|
| 60 |
+
" </tbody>\n",
|
| 61 |
+
"</table>\n",
|
| 62 |
+
"</div>"
|
| 63 |
+
],
|
| 64 |
+
"text/plain": [
|
| 65 |
+
" Input Tags\n",
|
| 66 |
+
"0 Title: What is the effective differencial effe... ['electronics']\n",
|
| 67 |
+
"1 Title: Heat sensor with fan cooling Body: Can ... ['electronics']\n",
|
| 68 |
+
"2 Title: Outlet Installation--more wires than my... ['electronics']\n",
|
| 69 |
+
"3 Title: Buck Converter Operation Question Body:... ['electronics']\n",
|
| 70 |
+
"4 Title: Urgent help in area of ASIC design, ver... ['electronics']"
|
| 71 |
+
]
|
| 72 |
+
},
|
| 73 |
+
"execution_count": 6,
|
| 74 |
+
"metadata": {},
|
| 75 |
+
"output_type": "execute_result"
|
| 76 |
+
}
|
| 77 |
+
],
|
| 78 |
+
"source": [
|
| 79 |
+
"import pandas as pd\n",
|
| 80 |
+
"\n",
|
| 81 |
+
"# Read the CSV file\n",
|
| 82 |
+
"data = pd.read_csv('data/all_combined_data.csv')\n",
|
| 83 |
+
"\n",
|
| 84 |
+
"# Display the first few rows of the data\n",
|
| 85 |
+
"data.head()"
|
| 86 |
+
]
|
| 87 |
+
},
|
| 88 |
+
{
|
| 89 |
+
"cell_type": "code",
|
| 90 |
+
"execution_count": 7,
|
| 91 |
+
"id": "c9057558",
|
| 92 |
+
"metadata": {},
|
| 93 |
+
"outputs": [
|
| 94 |
+
{
|
| 95 |
+
"data": {
|
| 96 |
+
"text/html": [
|
| 97 |
+
"<div>\n",
|
| 98 |
+
"<style scoped>\n",
|
| 99 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 100 |
+
" vertical-align: middle;\n",
|
| 101 |
+
" }\n",
|
| 102 |
+
"\n",
|
| 103 |
+
" .dataframe tbody tr th {\n",
|
| 104 |
+
" vertical-align: top;\n",
|
| 105 |
+
" }\n",
|
| 106 |
+
"\n",
|
| 107 |
+
" .dataframe thead th {\n",
|
| 108 |
+
" text-align: right;\n",
|
| 109 |
+
" }\n",
|
| 110 |
+
"</style>\n",
|
| 111 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 112 |
+
" <thead>\n",
|
| 113 |
+
" <tr style=\"text-align: right;\">\n",
|
| 114 |
+
" <th></th>\n",
|
| 115 |
+
" <th>Input</th>\n",
|
| 116 |
+
" <th>Tags</th>\n",
|
| 117 |
+
" </tr>\n",
|
| 118 |
+
" </thead>\n",
|
| 119 |
+
" <tbody>\n",
|
| 120 |
+
" <tr>\n",
|
| 121 |
+
" <th>162648</th>\n",
|
| 122 |
+
" <td>Title: Is there an optimal strategy for placi...</td>\n",
|
| 123 |
+
" <td>['algorithms', 'efficiency']</td>\n",
|
| 124 |
+
" </tr>\n",
|
| 125 |
+
" <tr>\n",
|
| 126 |
+
" <th>162649</th>\n",
|
| 127 |
+
" <td>Title: Handling Events Across Multi-Server Ap...</td>\n",
|
| 128 |
+
" <td>['c#', 'application-design']</td>\n",
|
| 129 |
+
" </tr>\n",
|
| 130 |
+
" <tr>\n",
|
| 131 |
+
" <th>162650</th>\n",
|
| 132 |
+
" <td>Title: Is it good practice to use try catch f...</td>\n",
|
| 133 |
+
" <td>['design', 'engineering']</td>\n",
|
| 134 |
+
" </tr>\n",
|
| 135 |
+
" <tr>\n",
|
| 136 |
+
" <th>162651</th>\n",
|
| 137 |
+
" <td>Title: What do you call a process which trans...</td>\n",
|
| 138 |
+
" <td>['python', 'naming', 'parsing', 'serialization']</td>\n",
|
| 139 |
+
" </tr>\n",
|
| 140 |
+
" <tr>\n",
|
| 141 |
+
" <th>162652</th>\n",
|
| 142 |
+
" <td>Title: Testing : Why is it necessary?Body: He...</td>\n",
|
| 143 |
+
" <td>['testing']</td>\n",
|
| 144 |
+
" </tr>\n",
|
| 145 |
+
" </tbody>\n",
|
| 146 |
+
"</table>\n",
|
| 147 |
+
"</div>"
|
| 148 |
+
],
|
| 149 |
+
"text/plain": [
|
| 150 |
+
" Input \\\n",
|
| 151 |
+
"162648 Title: Is there an optimal strategy for placi... \n",
|
| 152 |
+
"162649 Title: Handling Events Across Multi-Server Ap... \n",
|
| 153 |
+
"162650 Title: Is it good practice to use try catch f... \n",
|
| 154 |
+
"162651 Title: What do you call a process which trans... \n",
|
| 155 |
+
"162652 Title: Testing : Why is it necessary?Body: He... \n",
|
| 156 |
+
"\n",
|
| 157 |
+
" Tags \n",
|
| 158 |
+
"162648 ['algorithms', 'efficiency'] \n",
|
| 159 |
+
"162649 ['c#', 'application-design'] \n",
|
| 160 |
+
"162650 ['design', 'engineering'] \n",
|
| 161 |
+
"162651 ['python', 'naming', 'parsing', 'serialization'] \n",
|
| 162 |
+
"162652 ['testing'] "
|
| 163 |
+
]
|
| 164 |
+
},
|
| 165 |
+
"execution_count": 7,
|
| 166 |
+
"metadata": {},
|
| 167 |
+
"output_type": "execute_result"
|
| 168 |
+
}
|
| 169 |
+
],
|
| 170 |
+
"source": [
|
| 171 |
+
"data.tail()"
|
| 172 |
+
]
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"cell_type": "code",
|
| 176 |
+
"execution_count": null,
|
| 177 |
+
"id": "297d09db",
|
| 178 |
+
"metadata": {},
|
| 179 |
+
"outputs": [],
|
| 180 |
+
"source": [
|
| 181 |
+
"data['Tags'] = data['Tags'].apply(lambda tags: tags + ['ignore'] if len(tags) == 1 else tags)"
|
| 182 |
+
]
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"cell_type": "code",
|
| 186 |
+
"execution_count": 5,
|
| 187 |
+
"id": "e3d5ffe0",
|
| 188 |
+
"metadata": {},
|
| 189 |
+
"outputs": [
|
| 190 |
+
{
|
| 191 |
+
"data": {
|
| 192 |
+
"text/html": [
|
| 193 |
+
"<div>\n",
|
| 194 |
+
"<style scoped>\n",
|
| 195 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 196 |
+
" vertical-align: middle;\n",
|
| 197 |
+
" }\n",
|
| 198 |
+
"\n",
|
| 199 |
+
" .dataframe tbody tr th {\n",
|
| 200 |
+
" vertical-align: top;\n",
|
| 201 |
+
" }\n",
|
| 202 |
+
"\n",
|
| 203 |
+
" .dataframe thead th {\n",
|
| 204 |
+
" text-align: right;\n",
|
| 205 |
+
" }\n",
|
| 206 |
+
"</style>\n",
|
| 207 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 208 |
+
" <thead>\n",
|
| 209 |
+
" <tr style=\"text-align: right;\">\n",
|
| 210 |
+
" <th></th>\n",
|
| 211 |
+
" <th>Input</th>\n",
|
| 212 |
+
" <th>Tags</th>\n",
|
| 213 |
+
" </tr>\n",
|
| 214 |
+
" </thead>\n",
|
| 215 |
+
" <tbody>\n",
|
| 216 |
+
" <tr>\n",
|
| 217 |
+
" <th>0</th>\n",
|
| 218 |
+
" <td>Title: What is the effective differencial effe...</td>\n",
|
| 219 |
+
" <td>['electronics']</td>\n",
|
| 220 |
+
" </tr>\n",
|
| 221 |
+
" <tr>\n",
|
| 222 |
+
" <th>1</th>\n",
|
| 223 |
+
" <td>Title: Heat sensor with fan cooling Body: Can ...</td>\n",
|
| 224 |
+
" <td>['electronics']</td>\n",
|
| 225 |
+
" </tr>\n",
|
| 226 |
+
" <tr>\n",
|
| 227 |
+
" <th>2</th>\n",
|
| 228 |
+
" <td>Title: Outlet Installation--more wires than my...</td>\n",
|
| 229 |
+
" <td>['electronics']</td>\n",
|
| 230 |
+
" </tr>\n",
|
| 231 |
+
" <tr>\n",
|
| 232 |
+
" <th>3</th>\n",
|
| 233 |
+
" <td>Title: Buck Converter Operation Question Body:...</td>\n",
|
| 234 |
+
" <td>['electronics']</td>\n",
|
| 235 |
+
" </tr>\n",
|
| 236 |
+
" <tr>\n",
|
| 237 |
+
" <th>4</th>\n",
|
| 238 |
+
" <td>Title: Urgent help in area of ASIC design, ver...</td>\n",
|
| 239 |
+
" <td>['electronics']</td>\n",
|
| 240 |
+
" </tr>\n",
|
| 241 |
+
" </tbody>\n",
|
| 242 |
+
"</table>\n",
|
| 243 |
+
"</div>"
|
| 244 |
+
],
|
| 245 |
+
"text/plain": [
|
| 246 |
+
" Input Tags\n",
|
| 247 |
+
"0 Title: What is the effective differencial effe... ['electronics']\n",
|
| 248 |
+
"1 Title: Heat sensor with fan cooling Body: Can ... ['electronics']\n",
|
| 249 |
+
"2 Title: Outlet Installation--more wires than my... ['electronics']\n",
|
| 250 |
+
"3 Title: Buck Converter Operation Question Body:... ['electronics']\n",
|
| 251 |
+
"4 Title: Urgent help in area of ASIC design, ver... ['electronics']"
|
| 252 |
+
]
|
| 253 |
+
},
|
| 254 |
+
"execution_count": 5,
|
| 255 |
+
"metadata": {},
|
| 256 |
+
"output_type": "execute_result"
|
| 257 |
+
}
|
| 258 |
+
],
|
| 259 |
+
"source": [
|
| 260 |
+
"data.head()"
|
| 261 |
+
]
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"cell_type": "code",
|
| 265 |
+
"execution_count": 13,
|
| 266 |
+
"id": "9a1cba07",
|
| 267 |
+
"metadata": {},
|
| 268 |
+
"outputs": [
|
| 269 |
+
{
|
| 270 |
+
"data": {
|
| 271 |
+
"text/html": [
|
| 272 |
+
"<div>\n",
|
| 273 |
+
"<style scoped>\n",
|
| 274 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 275 |
+
" vertical-align: middle;\n",
|
| 276 |
+
" }\n",
|
| 277 |
+
"\n",
|
| 278 |
+
" .dataframe tbody tr th {\n",
|
| 279 |
+
" vertical-align: top;\n",
|
| 280 |
+
" }\n",
|
| 281 |
+
"\n",
|
| 282 |
+
" .dataframe thead th {\n",
|
| 283 |
+
" text-align: right;\n",
|
| 284 |
+
" }\n",
|
| 285 |
+
"</style>\n",
|
| 286 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 287 |
+
" <thead>\n",
|
| 288 |
+
" <tr style=\"text-align: right;\">\n",
|
| 289 |
+
" <th></th>\n",
|
| 290 |
+
" <th>Input</th>\n",
|
| 291 |
+
" <th>Tags</th>\n",
|
| 292 |
+
" </tr>\n",
|
| 293 |
+
" </thead>\n",
|
| 294 |
+
" <tbody>\n",
|
| 295 |
+
" <tr>\n",
|
| 296 |
+
" <th>0</th>\n",
|
| 297 |
+
" <td>Title: What is the effective differencial effe...</td>\n",
|
| 298 |
+
" <td>[electronics, ignore]</td>\n",
|
| 299 |
+
" </tr>\n",
|
| 300 |
+
" <tr>\n",
|
| 301 |
+
" <th>1</th>\n",
|
| 302 |
+
" <td>Title: Heat sensor with fan cooling Body: Can ...</td>\n",
|
| 303 |
+
" <td>[electronics, ignore]</td>\n",
|
| 304 |
+
" </tr>\n",
|
| 305 |
+
" <tr>\n",
|
| 306 |
+
" <th>2</th>\n",
|
| 307 |
+
" <td>Title: Outlet Installation--more wires than my...</td>\n",
|
| 308 |
+
" <td>[electronics, ignore]</td>\n",
|
| 309 |
+
" </tr>\n",
|
| 310 |
+
" <tr>\n",
|
| 311 |
+
" <th>3</th>\n",
|
| 312 |
+
" <td>Title: Buck Converter Operation Question Body:...</td>\n",
|
| 313 |
+
" <td>[electronics, ignore]</td>\n",
|
| 314 |
+
" </tr>\n",
|
| 315 |
+
" <tr>\n",
|
| 316 |
+
" <th>4</th>\n",
|
| 317 |
+
" <td>Title: Urgent help in area of ASIC design, ver...</td>\n",
|
| 318 |
+
" <td>[electronics, ignore]</td>\n",
|
| 319 |
+
" </tr>\n",
|
| 320 |
+
" </tbody>\n",
|
| 321 |
+
"</table>\n",
|
| 322 |
+
"</div>"
|
| 323 |
+
],
|
| 324 |
+
"text/plain": [
|
| 325 |
+
" Input Tags\n",
|
| 326 |
+
"0 Title: What is the effective differencial effe... [electronics, ignore]\n",
|
| 327 |
+
"1 Title: Heat sensor with fan cooling Body: Can ... [electronics, ignore]\n",
|
| 328 |
+
"2 Title: Outlet Installation--more wires than my... [electronics, ignore]\n",
|
| 329 |
+
"3 Title: Buck Converter Operation Question Body:... [electronics, ignore]\n",
|
| 330 |
+
"4 Title: Urgent help in area of ASIC design, ver... [electronics, ignore]"
|
| 331 |
+
]
|
| 332 |
+
},
|
| 333 |
+
"execution_count": 13,
|
| 334 |
+
"metadata": {},
|
| 335 |
+
"output_type": "execute_result"
|
| 336 |
+
}
|
| 337 |
+
],
|
| 338 |
+
"source": [
|
| 339 |
+
"data.head()"
|
| 340 |
+
]
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"cell_type": "code",
|
| 344 |
+
"execution_count": null,
|
| 345 |
+
"id": "6b6a6eba",
|
| 346 |
+
"metadata": {},
|
| 347 |
+
"outputs": [
|
| 348 |
+
{
|
| 349 |
+
"ename": "NameError",
|
| 350 |
+
"evalue": "name 'data' is not defined",
|
| 351 |
+
"output_type": "error",
|
| 352 |
+
"traceback": [
|
| 353 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
| 354 |
+
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
|
| 355 |
+
"Cell \u001b[0;32mIn[1], line 9\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01msklearn\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodel_selection\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m train_test_split\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01msklearn\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmetrics\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m classification_report\n\u001b[0;32m----> 9\u001b[0m df \u001b[38;5;241m=\u001b[39m \u001b[43mdata\u001b[49m[[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mInput\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTags\u001b[39m\u001b[38;5;124m'\u001b[39m]]\u001b[38;5;241m.\u001b[39mcopy()\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mast\u001b[39;00m\n\u001b[1;32m 12\u001b[0m df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTags\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m df[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTags\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mapply(ast\u001b[38;5;241m.\u001b[39mliteral_eval)\n",
|
| 356 |
+
"\u001b[0;31mNameError\u001b[0m: name 'data' is not defined"
|
| 357 |
+
]
|
| 358 |
+
}
|
| 359 |
+
],
|
| 360 |
+
"source": [
|
| 361 |
+
"from sklearn.preprocessing import MultiLabelBinarizer\n",
|
| 362 |
+
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
|
| 363 |
+
"from sklearn.ensemble import RandomForestClassifier\n",
|
| 364 |
+
"from sklearn.multioutput import MultiOutputClassifier\n",
|
| 365 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 366 |
+
"from sklearn.metrics import classification_report\n",
|
| 367 |
+
"\n",
|
| 368 |
+
"\n",
|
| 369 |
+
"df = data[['Input', 'Tags']].copy()\n",
|
| 370 |
+
"import ast\n",
|
| 371 |
+
"\n",
|
| 372 |
+
"df[\"Tags\"] = df[\"Tags\"].apply(ast.literal_eval)\n",
|
| 373 |
+
"\n",
|
| 374 |
+
"# Step 1: One-hot encode the tags\n",
|
| 375 |
+
"mlb = MultiLabelBinarizer()\n",
|
| 376 |
+
"y = mlb.fit_transform(df[\"Tags\"])\n",
|
| 377 |
+
"\n",
|
| 378 |
+
"# Step 2: TF-IDF on Input column\n",
|
| 379 |
+
"tfidf = TfidfVectorizer(max_features=5000)\n",
|
| 380 |
+
"X = tfidf.fit_transform(df[\"Input\"])\n",
|
| 381 |
+
"\n",
|
| 382 |
+
"# Step 3: Train-test split\n",
|
| 383 |
+
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
|
| 384 |
+
"\n",
|
| 385 |
+
"# Step 4: Train Random Forest in MultiOutputClassifier\n",
|
| 386 |
+
"rf = RandomForestClassifier(n_estimators=100, random_state=42)\n",
|
| 387 |
+
"multi_rf = MultiOutputClassifier(rf)\n",
|
| 388 |
+
"multi_rf.fit(X_train, y_train)\n",
|
| 389 |
+
"\n",
|
| 390 |
+
"# Step 5: Predict and evaluate\n",
|
| 391 |
+
"y_pred = multi_rf.predict(X_test)\n",
|
| 392 |
+
"print(classification_report(y_test, y_pred, target_names=mlb.classes_))\n",
|
| 393 |
+
"# Step 6: Save the model\n",
|
| 394 |
+
"import joblib\n",
|
| 395 |
+
"joblib.dump(multi_rf, 'multi_rf_model.pkl')\n",
|
| 396 |
+
"# Step 7: Save the TF-IDF vectorizer\n",
|
| 397 |
+
"joblib.dump(tfidf, 'tfidf_vectorizer.pkl')\n",
|
| 398 |
+
"# Step 8: Save the label binarizer\n",
|
| 399 |
+
"joblib.dump(mlb, 'mlb.pkl')\n",
|
| 400 |
+
"# Step 9: Load the model and vectorizer\n",
|
| 401 |
+
"loaded_model = joblib.load('multi_rf_model.pkl')\n",
|
| 402 |
+
"loaded_vectorizer = joblib.load('tfidf_vectorizer.pkl')\n",
|
| 403 |
+
"loaded_mlb = joblib.load('mlb.pkl')\n",
|
| 404 |
+
"\n"
|
| 405 |
+
]
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"cell_type": "code",
|
| 409 |
+
"execution_count": null,
|
| 410 |
+
"id": "9a11d48f",
|
| 411 |
+
"metadata": {},
|
| 412 |
+
"outputs": [],
|
| 413 |
+
"source": []
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"cell_type": "code",
|
| 417 |
+
"execution_count": 27,
|
| 418 |
+
"id": "6312830d",
|
| 419 |
+
"metadata": {},
|
| 420 |
+
"outputs": [
|
| 421 |
+
{
|
| 422 |
+
"name": "stdout",
|
| 423 |
+
"output_type": "stream",
|
| 424 |
+
"text": [
|
| 425 |
+
"Decoded tags: ('electronics',)\n"
|
| 426 |
+
]
|
| 427 |
+
}
|
| 428 |
+
],
|
| 429 |
+
"source": [
|
| 430 |
+
"import numpy as np\n",
|
| 431 |
+
"\n",
|
| 432 |
+
"print(\"Decoded tags:\", mlb.inverse_transform(np.array([y[0]]))[0])\n"
|
| 433 |
+
]
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"cell_type": "code",
|
| 437 |
+
"execution_count": 21,
|
| 438 |
+
"id": "bba02bd6",
|
| 439 |
+
"metadata": {},
|
| 440 |
+
"outputs": [],
|
| 441 |
+
"source": [
|
| 442 |
+
"import ast\n",
|
| 443 |
+
"\n",
|
| 444 |
+
"df[\"Tags\"] = df[\"Tags\"].apply(ast.literal_eval)\n"
|
| 445 |
+
]
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"cell_type": "code",
|
| 449 |
+
"execution_count": 22,
|
| 450 |
+
"id": "260ee444",
|
| 451 |
+
"metadata": {},
|
| 452 |
+
"outputs": [
|
| 453 |
+
{
|
| 454 |
+
"name": "stdout",
|
| 455 |
+
"output_type": "stream",
|
| 456 |
+
"text": [
|
| 457 |
+
"0 [electronics]\n",
|
| 458 |
+
"1 [electronics]\n",
|
| 459 |
+
"2 [electronics]\n",
|
| 460 |
+
"3 [electronics]\n",
|
| 461 |
+
"4 [electronics]\n",
|
| 462 |
+
"Name: Tags, dtype: object\n",
|
| 463 |
+
"<class 'list'>\n"
|
| 464 |
+
]
|
| 465 |
+
}
|
| 466 |
+
],
|
| 467 |
+
"source": [
|
| 468 |
+
"print(df[\"Tags\"].head())\n",
|
| 469 |
+
"print(type(df[\"Tags\"].iloc[0]))\n"
|
| 470 |
+
]
|
| 471 |
+
},
|
| 472 |
+
{
|
| 473 |
+
"cell_type": "code",
|
| 474 |
+
"execution_count": 23,
|
| 475 |
+
"id": "61f2205b",
|
| 476 |
+
"metadata": {},
|
| 477 |
+
"outputs": [],
|
| 478 |
+
"source": [
|
| 479 |
+
"y = mlb.fit_transform(df[\"Tags\"])\n"
|
| 480 |
+
]
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"cell_type": "code",
|
| 484 |
+
"execution_count": 24,
|
| 485 |
+
"id": "b6edbea6",
|
| 486 |
+
"metadata": {},
|
| 487 |
+
"outputs": [
|
| 488 |
+
{
|
| 489 |
+
"name": "stdout",
|
| 490 |
+
"output_type": "stream",
|
| 491 |
+
"text": [
|
| 492 |
+
"Decoded tags: ('electronics',)\n"
|
| 493 |
+
]
|
| 494 |
+
}
|
| 495 |
+
],
|
| 496 |
+
"source": [
|
| 497 |
+
"print(\"Decoded tags:\", mlb.inverse_transform(np.array([y[0]]))[0])\n"
|
| 498 |
+
]
|
| 499 |
+
},
|
| 500 |
+
{
|
| 501 |
+
"cell_type": "code",
|
| 502 |
+
"execution_count": 26,
|
| 503 |
+
"id": "60b80231",
|
| 504 |
+
"metadata": {},
|
| 505 |
+
"outputs": [
|
| 506 |
+
{
|
| 507 |
+
"data": {
|
| 508 |
+
"text/plain": [
|
| 509 |
+
"array([0, 0, 0, ..., 0, 0, 0], shape=(13158,))"
|
| 510 |
+
]
|
| 511 |
+
},
|
| 512 |
+
"execution_count": 26,
|
| 513 |
+
"metadata": {},
|
| 514 |
+
"output_type": "execute_result"
|
| 515 |
+
}
|
| 516 |
+
],
|
| 517 |
+
"source": [
|
| 518 |
+
"y[0]"
|
| 519 |
+
]
|
| 520 |
+
},
|
| 521 |
+
{
|
| 522 |
+
"cell_type": "code",
|
| 523 |
+
"execution_count": 25,
|
| 524 |
+
"id": "30981b76",
|
| 525 |
+
"metadata": {},
|
| 526 |
+
"outputs": [
|
| 527 |
+
{
|
| 528 |
+
"data": {
|
| 529 |
+
"text/plain": [
|
| 530 |
+
"Input object\n",
|
| 531 |
+
"Tags object\n",
|
| 532 |
+
"dtype: object"
|
| 533 |
+
]
|
| 534 |
+
},
|
| 535 |
+
"execution_count": 25,
|
| 536 |
+
"metadata": {},
|
| 537 |
+
"output_type": "execute_result"
|
| 538 |
+
}
|
| 539 |
+
],
|
| 540 |
+
"source": [
|
| 541 |
+
"data.dtypes"
|
| 542 |
+
]
|
| 543 |
+
},
|
| 544 |
+
{
|
| 545 |
+
"cell_type": "code",
|
| 546 |
+
"execution_count": 9,
|
| 547 |
+
"id": "132c7bfc",
|
| 548 |
+
"metadata": {},
|
| 549 |
+
"outputs": [
|
| 550 |
+
{
|
| 551 |
+
"name": "stdout",
|
| 552 |
+
"output_type": "stream",
|
| 553 |
+
"text": [
|
| 554 |
+
"Average Accuracy: 0.9140289373926052\n"
|
| 555 |
+
]
|
| 556 |
+
}
|
| 557 |
+
],
|
| 558 |
+
"source": [
|
| 559 |
+
"from sklearn.metrics import accuracy_score\n",
|
| 560 |
+
"\n",
|
| 561 |
+
"# Calculate accuracy for each label and then average\n",
|
| 562 |
+
"accuracies = [accuracy_score(y_test[:, i], y_pred[:, i]) for i in range(y_test.shape[1])]\n",
|
| 563 |
+
"average_accuracy = sum(accuracies) / len(accuracies)\n",
|
| 564 |
+
"\n",
|
| 565 |
+
"print(f\"Average Accuracy: {average_accuracy}\")"
|
| 566 |
+
]
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"cell_type": "code",
|
| 570 |
+
"execution_count": 14,
|
| 571 |
+
"id": "e09ede12",
|
| 572 |
+
"metadata": {},
|
| 573 |
+
"outputs": [
|
| 574 |
+
{
|
| 575 |
+
"name": "stdout",
|
| 576 |
+
"output_type": "stream",
|
| 577 |
+
"text": [
|
| 578 |
+
"Empty DataFrame\n",
|
| 579 |
+
"Columns: [Input, Tags]\n",
|
| 580 |
+
"Index: []\n"
|
| 581 |
+
]
|
| 582 |
+
}
|
| 583 |
+
],
|
| 584 |
+
"source": [
|
| 585 |
+
"single_tag_rows = df[df['Tags'].apply(len) <= 3]\n",
|
| 586 |
+
"print(single_tag_rows)"
|
| 587 |
+
]
|
| 588 |
+
},
|
| 589 |
+
{
|
| 590 |
+
"cell_type": "code",
|
| 591 |
+
"execution_count": 1,
|
| 592 |
+
"id": "644cd3c3",
|
| 593 |
+
"metadata": {},
|
| 594 |
+
"outputs": [
|
| 595 |
+
{
|
| 596 |
+
"name": "stdout",
|
| 597 |
+
"output_type": "stream",
|
| 598 |
+
"text": [
|
| 599 |
+
" precision recall f1-score support\n",
|
| 600 |
+
"\n",
|
| 601 |
+
" 0 0.35 0.67 0.46 9\n",
|
| 602 |
+
" 1 1.00 0.09 0.17 11\n",
|
| 603 |
+
" 2 0.91 0.71 0.80 14\n",
|
| 604 |
+
"\n",
|
| 605 |
+
" micro avg 0.59 0.50 0.54 34\n",
|
| 606 |
+
" macro avg 0.75 0.49 0.48 34\n",
|
| 607 |
+
"weighted avg 0.79 0.50 0.51 34\n",
|
| 608 |
+
" samples avg 0.51 0.48 0.45 34\n",
|
| 609 |
+
"\n"
|
| 610 |
+
]
|
| 611 |
+
},
|
| 612 |
+
{
|
| 613 |
+
"name": "stderr",
|
| 614 |
+
"output_type": "stream",
|
| 615 |
+
"text": [
|
| 616 |
+
"/home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages/sklearn/metrics/_classification.py:1565: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in samples with no predicted labels. Use `zero_division` parameter to control this behavior.\n",
|
| 617 |
+
" _warn_prf(average, modifier, f\"{metric.capitalize()} is\", len(result))\n",
|
| 618 |
+
"/home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages/sklearn/metrics/_classification.py:1565: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in samples with no true labels. Use `zero_division` parameter to control this behavior.\n",
|
| 619 |
+
" _warn_prf(average, modifier, f\"{metric.capitalize()} is\", len(result))\n"
|
| 620 |
+
]
|
| 621 |
+
}
|
| 622 |
+
],
|
| 623 |
+
"source": [
|
| 624 |
+
"from sklearn.ensemble import RandomForestClassifier\n",
|
| 625 |
+
"from sklearn.multioutput import MultiOutputClassifier\n",
|
| 626 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 627 |
+
"from sklearn.metrics import classification_report\n",
|
| 628 |
+
"import numpy as np\n",
|
| 629 |
+
"\n",
|
| 630 |
+
"# Example: X has 100 samples, 20 features; y has 100 samples, 3 possible labels per sample\n",
|
| 631 |
+
"X = np.random.rand(100, 20)\n",
|
| 632 |
+
"y = np.random.randint(2, size=(100, 3)) # Binary presence/absence for 3 labels\n",
|
| 633 |
+
"\n",
|
| 634 |
+
"# Train-test split\n",
|
| 635 |
+
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
|
| 636 |
+
"\n",
|
| 637 |
+
"# Initialize base RandomForest classifier\n",
|
| 638 |
+
"rf = RandomForestClassifier(n_estimators=100, random_state=42)\n",
|
| 639 |
+
"\n",
|
| 640 |
+
"# Wrap it for multi-label classification\n",
|
| 641 |
+
"multi_rf = MultiOutputClassifier(rf)\n",
|
| 642 |
+
"\n",
|
| 643 |
+
"# Train the model\n",
|
| 644 |
+
"multi_rf.fit(X_train, y_train)\n",
|
| 645 |
+
"\n",
|
| 646 |
+
"# Predict\n",
|
| 647 |
+
"y_pred = multi_rf.predict(X_test)\n",
|
| 648 |
+
"\n",
|
| 649 |
+
"# Evaluate\n",
|
| 650 |
+
"print(classification_report(y_test, y_pred))\n"
|
| 651 |
+
]
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"cell_type": "code",
|
| 655 |
+
"execution_count": 4,
|
| 656 |
+
"id": "32e4d5bc",
|
| 657 |
+
"metadata": {},
|
| 658 |
+
"outputs": [
|
| 659 |
+
{
|
| 660 |
+
"data": {
|
| 661 |
+
"text/plain": [
|
| 662 |
+
"array([0, 1, 1])"
|
| 663 |
+
]
|
| 664 |
+
},
|
| 665 |
+
"execution_count": 4,
|
| 666 |
+
"metadata": {},
|
| 667 |
+
"output_type": "execute_result"
|
| 668 |
+
}
|
| 669 |
+
],
|
| 670 |
+
"source": [
|
| 671 |
+
"y_train[3]"
|
| 672 |
+
]
|
| 673 |
+
},
|
| 674 |
+
{
|
| 675 |
+
"cell_type": "code",
|
| 676 |
+
"execution_count": null,
|
| 677 |
+
"id": "a2477b56",
|
| 678 |
+
"metadata": {},
|
| 679 |
+
"outputs": [],
|
| 680 |
+
"source": []
|
| 681 |
+
}
|
| 682 |
+
],
|
| 683 |
+
"metadata": {
|
| 684 |
+
"kernelspec": {
|
| 685 |
+
"display_name": "major02",
|
| 686 |
+
"language": "python",
|
| 687 |
+
"name": "python3"
|
| 688 |
+
},
|
| 689 |
+
"language_info": {
|
| 690 |
+
"codemirror_mode": {
|
| 691 |
+
"name": "ipython",
|
| 692 |
+
"version": 3
|
| 693 |
+
},
|
| 694 |
+
"file_extension": ".py",
|
| 695 |
+
"mimetype": "text/x-python",
|
| 696 |
+
"name": "python",
|
| 697 |
+
"nbconvert_exporter": "python",
|
| 698 |
+
"pygments_lexer": "ipython3",
|
| 699 |
+
"version": "3.10.12"
|
| 700 |
+
}
|
| 701 |
+
},
|
| 702 |
+
"nbformat": 4,
|
| 703 |
+
"nbformat_minor": 5
|
| 704 |
+
}
|
Llama3_1_(8B)_Alpaca.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Methords/TFIDF.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import re
|
| 4 |
+
import nltk
|
| 5 |
+
from nltk.corpus import stopwords
|
| 6 |
+
from nltk.tokenize import word_tokenize
|
| 7 |
+
from nltk.stem import WordNetLemmatizer
|
| 8 |
+
|
| 9 |
+
# Download necessary NLTK resources
|
| 10 |
+
# nltk.download('stopwords')
|
| 11 |
+
# nltk.download('punkt')
|
| 12 |
+
# nltk.download('wordnet')
|
| 13 |
+
|
| 14 |
+
# Read the CSV file
|
| 15 |
+
file_path = '/home/darth/#/SEQuestionClassifier/data/all_combined_data.csv'
|
| 16 |
+
df = pd.read_csv(file_path)
|
| 17 |
+
|
| 18 |
+
import ast
|
| 19 |
+
df["Tags"] = df["Tags"].apply(ast.literal_eval)
|
| 20 |
+
|
| 21 |
+
lemmatizer = WordNetLemmatizer()
|
| 22 |
+
stop_words = set(stopwords.words('english'))
|
| 23 |
+
|
| 24 |
+
def preprocess_text(text):
|
| 25 |
+
"""Function to clean text and perform lemitisation"""
|
| 26 |
+
text = text.lower()
|
| 27 |
+
text = re.sub(r'[^\w\s]', '', text)
|
| 28 |
+
words = word_tokenize(text)
|
| 29 |
+
words = [lemmatizer.lemmatize(word) for word in words if word not in stop_words]
|
| 30 |
+
return " ".join(words)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
| 34 |
+
from sklearn.preprocessing import LabelEncoder
|
| 35 |
+
from sklearn.preprocessing import MultiLabelBinarizer
|
| 36 |
+
|
| 37 |
+
def vectorirse_text(text):
|
| 38 |
+
""" Recieves text as input and returns TF-IDF vectors"""
|
| 39 |
+
text = text.apply(preprocess_text)
|
| 40 |
+
tfidf = TfidfVectorizer(max_features=500000)
|
| 41 |
+
X = tfidf.fit_transform(text)
|
| 42 |
+
return X
|
| 43 |
+
|
| 44 |
+
def label_encoding(input):
|
| 45 |
+
mlb = MultiLabelBinarizer()
|
| 46 |
+
return mlb.fit_transform(input)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
X = vectorirse_text(df['Input'])
|
| 50 |
+
y = label_encoding(df['Tags'])
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
Phase1/Albert.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Phase1/Albert_full.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Phase1/Combined.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:91d244e21d42b9026b784510dd08cac0287828053e0177259c5fe4935d8ad54a
|
| 3 |
+
size 24834935
|
Phase1/Combined_full.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b343bbc3c288ac26557ffceb8a8bff0b9d760832bea2493341e277bf6135b2a9
|
| 3 |
+
size 24798095
|
Phase1/GloVe.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Phase1/GloVe_full.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Phase1/TFIDF.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Phase1/TFIDF_full.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Phase1/Word2Vec.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Phase1/Word2Vec_full.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
all_combined_data.csv
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0d3216e994475f803750a0364a3c76ba4b52bda114360e8fab5390c73657f9b
|
| 3 |
+
size 166700687
|
all_combined_data.csv.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:29cfef6c46dcd2826d4ca866c576fb5f806be5fa79d292c6f5b9f7542c117053
|
| 3 |
+
size 167805959
|
dataset_explore.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ensemble_phase1.ipynb
ADDED
|
@@ -0,0 +1,1767 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 28,
|
| 6 |
+
"id": "a7a23542",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [],
|
| 9 |
+
"source": [
|
| 10 |
+
"import pandas as pd \n",
|
| 11 |
+
"dataset = pd.read_csv(\"data/hackerank/combined_data.csv\")\n",
|
| 12 |
+
"import ast\n",
|
| 13 |
+
"\n",
|
| 14 |
+
"def clean_tags(tag_string):\n",
|
| 15 |
+
" # Convert the string to a list\n",
|
| 16 |
+
" tag_list = ast.literal_eval(tag_string)\n",
|
| 17 |
+
" # Join the list into a comma-separated string\n",
|
| 18 |
+
" return ', '.join(tag_list)\n",
|
| 19 |
+
"\n",
|
| 20 |
+
"dataset['Tags']=dataset['Tags'].apply(clean_tags)"
|
| 21 |
+
]
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"cell_type": "code",
|
| 25 |
+
"execution_count": 29,
|
| 26 |
+
"id": "da4491c7",
|
| 27 |
+
"metadata": {},
|
| 28 |
+
"outputs": [],
|
| 29 |
+
"source": [
|
| 30 |
+
"df = dataset"
|
| 31 |
+
]
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"cell_type": "code",
|
| 35 |
+
"execution_count": 30,
|
| 36 |
+
"id": "362c543a",
|
| 37 |
+
"metadata": {},
|
| 38 |
+
"outputs": [],
|
| 39 |
+
"source": [
|
| 40 |
+
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
|
| 41 |
+
"from sklearn.preprocessing import LabelEncoder\n",
|
| 42 |
+
"\n",
|
| 43 |
+
"def vectorirse_text(text):\n",
|
| 44 |
+
" \"\"\" Recieves text as input and returns TF-IDF vectors\"\"\"\n",
|
| 45 |
+
" tfidf = TfidfVectorizer(max_features=500000)\n",
|
| 46 |
+
" X = tfidf.fit_transform(text)\n",
|
| 47 |
+
" return X\n",
|
| 48 |
+
"\n",
|
| 49 |
+
"def label_encoding(input):\n",
|
| 50 |
+
" label_encoder = LabelEncoder()\n",
|
| 51 |
+
" return label_encoder.fit_transform(input)\n",
|
| 52 |
+
"\n",
|
| 53 |
+
"\n",
|
| 54 |
+
"X = vectorirse_text(df['Input'])\n",
|
| 55 |
+
"y = label_encoding(df['Tags'])"
|
| 56 |
+
]
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"cell_type": "code",
|
| 60 |
+
"execution_count": 31,
|
| 61 |
+
"id": "b86c2a03",
|
| 62 |
+
"metadata": {},
|
| 63 |
+
"outputs": [
|
| 64 |
+
{
|
| 65 |
+
"name": "stdout",
|
| 66 |
+
"output_type": "stream",
|
| 67 |
+
"text": [
|
| 68 |
+
"✅ Accuracy on Test Set: 0.9124\n",
|
| 69 |
+
"\n",
|
| 70 |
+
"✅ Classification Report on Test Set:\n",
|
| 71 |
+
"\n",
|
| 72 |
+
" precision recall f1-score support\n",
|
| 73 |
+
"\n",
|
| 74 |
+
" 0 0.95 0.90 0.92 792\n",
|
| 75 |
+
" 1 0.86 0.85 0.85 709\n",
|
| 76 |
+
" 2 0.92 0.93 0.92 729\n",
|
| 77 |
+
" 3 0.94 0.94 0.94 830\n",
|
| 78 |
+
" 4 0.92 0.86 0.89 486\n",
|
| 79 |
+
" 5 0.94 0.94 0.94 680\n",
|
| 80 |
+
" 6 0.94 0.99 0.96 803\n",
|
| 81 |
+
" 7 0.85 0.88 0.86 634\n",
|
| 82 |
+
" 8 0.83 0.85 0.84 704\n",
|
| 83 |
+
" 9 0.97 0.95 0.96 684\n",
|
| 84 |
+
"\n",
|
| 85 |
+
" accuracy 0.91 7051\n",
|
| 86 |
+
" macro avg 0.91 0.91 0.91 7051\n",
|
| 87 |
+
"weighted avg 0.91 0.91 0.91 7051\n",
|
| 88 |
+
"\n",
|
| 89 |
+
"✅ Confusion Matrix on Test Set:\n",
|
| 90 |
+
"\n",
|
| 91 |
+
"[[711 22 8 4 0 7 4 22 12 2]\n",
|
| 92 |
+
" [ 12 604 8 3 2 10 5 12 51 2]\n",
|
| 93 |
+
" [ 7 2 679 5 7 7 8 8 6 0]\n",
|
| 94 |
+
" [ 1 6 4 783 16 5 2 5 6 2]\n",
|
| 95 |
+
" [ 1 6 10 21 418 1 4 4 17 4]\n",
|
| 96 |
+
" [ 5 10 8 1 0 639 11 3 3 0]\n",
|
| 97 |
+
" [ 0 0 6 0 0 3 793 1 0 0]\n",
|
| 98 |
+
" [ 6 26 7 4 1 0 11 557 19 3]\n",
|
| 99 |
+
" [ 7 27 10 11 4 4 4 36 597 4]\n",
|
| 100 |
+
" [ 0 1 1 4 4 3 1 11 7 652]]\n",
|
| 101 |
+
"✅ Saved probabilities to Phase1/TFIDF.csv successfully!\n"
|
| 102 |
+
]
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"data": {
|
| 106 |
+
"text/html": [
|
| 107 |
+
"<div>\n",
|
| 108 |
+
"<style scoped>\n",
|
| 109 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 110 |
+
" vertical-align: middle;\n",
|
| 111 |
+
" }\n",
|
| 112 |
+
"\n",
|
| 113 |
+
" .dataframe tbody tr th {\n",
|
| 114 |
+
" vertical-align: top;\n",
|
| 115 |
+
" }\n",
|
| 116 |
+
"\n",
|
| 117 |
+
" .dataframe thead th {\n",
|
| 118 |
+
" text-align: right;\n",
|
| 119 |
+
" }\n",
|
| 120 |
+
"</style>\n",
|
| 121 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 122 |
+
" <thead>\n",
|
| 123 |
+
" <tr style=\"text-align: right;\">\n",
|
| 124 |
+
" <th></th>\n",
|
| 125 |
+
" <th>y_true</th>\n",
|
| 126 |
+
" <th>class_0</th>\n",
|
| 127 |
+
" <th>class_1</th>\n",
|
| 128 |
+
" <th>class_2</th>\n",
|
| 129 |
+
" <th>class_3</th>\n",
|
| 130 |
+
" <th>class_4</th>\n",
|
| 131 |
+
" <th>class_5</th>\n",
|
| 132 |
+
" <th>class_6</th>\n",
|
| 133 |
+
" <th>class_7</th>\n",
|
| 134 |
+
" <th>class_8</th>\n",
|
| 135 |
+
" <th>class_9</th>\n",
|
| 136 |
+
" </tr>\n",
|
| 137 |
+
" </thead>\n",
|
| 138 |
+
" <tbody>\n",
|
| 139 |
+
" <tr>\n",
|
| 140 |
+
" <th>0</th>\n",
|
| 141 |
+
" <td>2</td>\n",
|
| 142 |
+
" <td>0.004856</td>\n",
|
| 143 |
+
" <td>0.009864</td>\n",
|
| 144 |
+
" <td>0.864422</td>\n",
|
| 145 |
+
" <td>0.008940</td>\n",
|
| 146 |
+
" <td>0.009183</td>\n",
|
| 147 |
+
" <td>0.016639</td>\n",
|
| 148 |
+
" <td>0.039277</td>\n",
|
| 149 |
+
" <td>0.026571</td>\n",
|
| 150 |
+
" <td>0.010824</td>\n",
|
| 151 |
+
" <td>0.009424</td>\n",
|
| 152 |
+
" </tr>\n",
|
| 153 |
+
" <tr>\n",
|
| 154 |
+
" <th>1</th>\n",
|
| 155 |
+
" <td>2</td>\n",
|
| 156 |
+
" <td>0.005297</td>\n",
|
| 157 |
+
" <td>0.006558</td>\n",
|
| 158 |
+
" <td>0.913401</td>\n",
|
| 159 |
+
" <td>0.006262</td>\n",
|
| 160 |
+
" <td>0.011597</td>\n",
|
| 161 |
+
" <td>0.018983</td>\n",
|
| 162 |
+
" <td>0.018909</td>\n",
|
| 163 |
+
" <td>0.006744</td>\n",
|
| 164 |
+
" <td>0.009302</td>\n",
|
| 165 |
+
" <td>0.002946</td>\n",
|
| 166 |
+
" </tr>\n",
|
| 167 |
+
" <tr>\n",
|
| 168 |
+
" <th>2</th>\n",
|
| 169 |
+
" <td>2</td>\n",
|
| 170 |
+
" <td>0.030078</td>\n",
|
| 171 |
+
" <td>0.035363</td>\n",
|
| 172 |
+
" <td>0.681136</td>\n",
|
| 173 |
+
" <td>0.038987</td>\n",
|
| 174 |
+
" <td>0.025477</td>\n",
|
| 175 |
+
" <td>0.060935</td>\n",
|
| 176 |
+
" <td>0.024796</td>\n",
|
| 177 |
+
" <td>0.033198</td>\n",
|
| 178 |
+
" <td>0.045627</td>\n",
|
| 179 |
+
" <td>0.024402</td>\n",
|
| 180 |
+
" </tr>\n",
|
| 181 |
+
" <tr>\n",
|
| 182 |
+
" <th>3</th>\n",
|
| 183 |
+
" <td>2</td>\n",
|
| 184 |
+
" <td>0.014440</td>\n",
|
| 185 |
+
" <td>0.015472</td>\n",
|
| 186 |
+
" <td>0.790973</td>\n",
|
| 187 |
+
" <td>0.023952</td>\n",
|
| 188 |
+
" <td>0.023529</td>\n",
|
| 189 |
+
" <td>0.020345</td>\n",
|
| 190 |
+
" <td>0.037677</td>\n",
|
| 191 |
+
" <td>0.027091</td>\n",
|
| 192 |
+
" <td>0.030567</td>\n",
|
| 193 |
+
" <td>0.015954</td>\n",
|
| 194 |
+
" </tr>\n",
|
| 195 |
+
" <tr>\n",
|
| 196 |
+
" <th>4</th>\n",
|
| 197 |
+
" <td>2</td>\n",
|
| 198 |
+
" <td>0.012646</td>\n",
|
| 199 |
+
" <td>0.019750</td>\n",
|
| 200 |
+
" <td>0.612334</td>\n",
|
| 201 |
+
" <td>0.116218</td>\n",
|
| 202 |
+
" <td>0.036532</td>\n",
|
| 203 |
+
" <td>0.027983</td>\n",
|
| 204 |
+
" <td>0.045918</td>\n",
|
| 205 |
+
" <td>0.060600</td>\n",
|
| 206 |
+
" <td>0.036102</td>\n",
|
| 207 |
+
" <td>0.031918</td>\n",
|
| 208 |
+
" </tr>\n",
|
| 209 |
+
" </tbody>\n",
|
| 210 |
+
"</table>\n",
|
| 211 |
+
"</div>"
|
| 212 |
+
],
|
| 213 |
+
"text/plain": [
|
| 214 |
+
" y_true class_0 class_1 class_2 class_3 class_4 class_5 \\\n",
|
| 215 |
+
"0 2 0.004856 0.009864 0.864422 0.008940 0.009183 0.016639 \n",
|
| 216 |
+
"1 2 0.005297 0.006558 0.913401 0.006262 0.011597 0.018983 \n",
|
| 217 |
+
"2 2 0.030078 0.035363 0.681136 0.038987 0.025477 0.060935 \n",
|
| 218 |
+
"3 2 0.014440 0.015472 0.790973 0.023952 0.023529 0.020345 \n",
|
| 219 |
+
"4 2 0.012646 0.019750 0.612334 0.116218 0.036532 0.027983 \n",
|
| 220 |
+
"\n",
|
| 221 |
+
" class_6 class_7 class_8 class_9 \n",
|
| 222 |
+
"0 0.039277 0.026571 0.010824 0.009424 \n",
|
| 223 |
+
"1 0.018909 0.006744 0.009302 0.002946 \n",
|
| 224 |
+
"2 0.024796 0.033198 0.045627 0.024402 \n",
|
| 225 |
+
"3 0.037677 0.027091 0.030567 0.015954 \n",
|
| 226 |
+
"4 0.045918 0.060600 0.036102 0.031918 "
|
| 227 |
+
]
|
| 228 |
+
},
|
| 229 |
+
"execution_count": 31,
|
| 230 |
+
"metadata": {},
|
| 231 |
+
"output_type": "execute_result"
|
| 232 |
+
}
|
| 233 |
+
],
|
| 234 |
+
"source": [
|
| 235 |
+
"import pandas as pd\n",
|
| 236 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 237 |
+
"from sklearn.linear_model import LogisticRegression\n",
|
| 238 |
+
"from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n",
|
| 239 |
+
"\n",
|
| 240 |
+
"# Assuming df is already loaded\n",
|
| 241 |
+
"# And vectorise_text and label_encoding are already available\n",
|
| 242 |
+
"\n",
|
| 243 |
+
"# Step 1: Preprocess\n",
|
| 244 |
+
"# X = vectorise_text(df['text'])\n",
|
| 245 |
+
"# y = label_encoding(df['label'])\n",
|
| 246 |
+
"\n",
|
| 247 |
+
"# Step 2: Train-test split\n",
|
| 248 |
+
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
|
| 249 |
+
"\n",
|
| 250 |
+
"# Step 3: Train Logistic Regression\n",
|
| 251 |
+
"model = LogisticRegression(max_iter=1000)\n",
|
| 252 |
+
"model.fit(X_train, y_train)\n",
|
| 253 |
+
"\n",
|
| 254 |
+
"# Step 4: Predict on the test set\n",
|
| 255 |
+
"y_preds = model.predict(X_test)\n",
|
| 256 |
+
"\n",
|
| 257 |
+
"# Step 5: Evaluate\n",
|
| 258 |
+
"acc = accuracy_score(y_test, y_preds)\n",
|
| 259 |
+
"print(f\"✅ Accuracy on Test Set: {acc:.4f}\\n\")\n",
|
| 260 |
+
"\n",
|
| 261 |
+
"print(\"✅ Classification Report on Test Set:\\n\")\n",
|
| 262 |
+
"print(classification_report(y_test, y_preds))\n",
|
| 263 |
+
"\n",
|
| 264 |
+
"print(\"✅ Confusion Matrix on Test Set:\\n\")\n",
|
| 265 |
+
"print(confusion_matrix(y_test, y_preds))\n",
|
| 266 |
+
"\n",
|
| 267 |
+
"# ----------------------------\n",
|
| 268 |
+
"# Step 6: Predict probabilities for FULL fX\n",
|
| 269 |
+
"# ----------------------------\n",
|
| 270 |
+
"\n",
|
| 271 |
+
"# Assuming fX and fy are available (full dataset for probability generation)\n",
|
| 272 |
+
"# fX = vectorise_text(df_full['text'])\n",
|
| 273 |
+
"# fy = label_encoding(df_full['label'])\n",
|
| 274 |
+
"\n",
|
| 275 |
+
"y_proba = model.predict_proba(X)\n",
|
| 276 |
+
"\n",
|
| 277 |
+
"# Step 7: Save probabilities\n",
|
| 278 |
+
"proba_df = pd.DataFrame(y_proba, columns=[f'class_{i}' for i in range(y_proba.shape[1])])\n",
|
| 279 |
+
"proba_df.insert(0, 'y_true', y)\n",
|
| 280 |
+
"\n",
|
| 281 |
+
"proba_df.to_csv('Phase1/TFIDF.csv', index=False)\n",
|
| 282 |
+
"\n",
|
| 283 |
+
"print(\"✅ Saved probabilities to Phase1/TFIDF.csv successfully!\")\n",
|
| 284 |
+
"proba_df.head()\n"
|
| 285 |
+
]
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"cell_type": "code",
|
| 289 |
+
"execution_count": null,
|
| 290 |
+
"id": "4bdabea9",
|
| 291 |
+
"metadata": {},
|
| 292 |
+
"outputs": [
|
| 293 |
+
{
|
| 294 |
+
"data": {
|
| 295 |
+
"text/plain": [
|
| 296 |
+
"35251"
|
| 297 |
+
]
|
| 298 |
+
},
|
| 299 |
+
"execution_count": 10,
|
| 300 |
+
"metadata": {},
|
| 301 |
+
"output_type": "execute_result"
|
| 302 |
+
}
|
| 303 |
+
],
|
| 304 |
+
"source": []
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"cell_type": "code",
|
| 308 |
+
"execution_count": 32,
|
| 309 |
+
"id": "7114ae44",
|
| 310 |
+
"metadata": {},
|
| 311 |
+
"outputs": [],
|
| 312 |
+
"source": [
|
| 313 |
+
"import gensim\n",
|
| 314 |
+
"import numpy as np\n",
|
| 315 |
+
"from sklearn.ensemble import RandomForestClassifier\n",
|
| 316 |
+
"\n",
|
| 317 |
+
"df['tokens'] = df['Input'].apply(lambda x: x.split())\n",
|
| 318 |
+
"\n",
|
| 319 |
+
"w2v_model = gensim.models.Word2Vec(sentences=df['tokens'], vector_size=10000, window=5, min_count=2, workers=10)\n",
|
| 320 |
+
"\n",
|
| 321 |
+
"def get_sentence_embedding(tokens):\n",
|
| 322 |
+
" vectors = [w2v_model.wv[word] for word in tokens if word in w2v_model.wv]\n",
|
| 323 |
+
" return np.mean(vectors, axis=0) if vectors else np.zeros(100)\n",
|
| 324 |
+
"\n",
|
| 325 |
+
"X_w2v = np.array(df['Input'].apply(get_sentence_embedding).tolist())\n",
|
| 326 |
+
"y_encoded = label_encoding(df['Tags'])\n"
|
| 327 |
+
]
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"cell_type": "code",
|
| 331 |
+
"execution_count": 33,
|
| 332 |
+
"id": "de12ab7c",
|
| 333 |
+
"metadata": {},
|
| 334 |
+
"outputs": [],
|
| 335 |
+
"source": [
|
| 336 |
+
"X = X_w2v\n",
|
| 337 |
+
"y = y_encoded"
|
| 338 |
+
]
|
| 339 |
+
},
|
| 340 |
+
{
|
| 341 |
+
"cell_type": "code",
|
| 342 |
+
"execution_count": 34,
|
| 343 |
+
"id": "c5777437",
|
| 344 |
+
"metadata": {},
|
| 345 |
+
"outputs": [
|
| 346 |
+
{
|
| 347 |
+
"name": "stderr",
|
| 348 |
+
"output_type": "stream",
|
| 349 |
+
"text": [
|
| 350 |
+
"/home/darth/.pyenv/versions/major02/lib/python3.10/site-packages/xgboost/training.py:183: UserWarning: [20:55:48] WARNING: /workspace/src/learner.cc:738: \n",
|
| 351 |
+
"Parameters: { \"use_label_encoder\" } are not used.\n",
|
| 352 |
+
"\n",
|
| 353 |
+
" bst.update(dtrain, iteration=i, fobj=obj)\n"
|
| 354 |
+
]
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
"name": "stdout",
|
| 358 |
+
"output_type": "stream",
|
| 359 |
+
"text": [
|
| 360 |
+
"✅ Accuracy on Test Set: 0.3624\n",
|
| 361 |
+
"\n",
|
| 362 |
+
"✅ Classification Report on Test Set:\n",
|
| 363 |
+
"\n",
|
| 364 |
+
" precision recall f1-score support\n",
|
| 365 |
+
"\n",
|
| 366 |
+
" 0 0.36 0.37 0.36 792\n",
|
| 367 |
+
" 1 0.23 0.21 0.22 709\n",
|
| 368 |
+
" 2 0.35 0.31 0.33 729\n",
|
| 369 |
+
" 3 0.36 0.42 0.39 830\n",
|
| 370 |
+
" 4 0.46 0.36 0.40 486\n",
|
| 371 |
+
" 5 0.37 0.33 0.35 680\n",
|
| 372 |
+
" 6 0.46 0.62 0.53 803\n",
|
| 373 |
+
" 7 0.29 0.27 0.28 634\n",
|
| 374 |
+
" 8 0.31 0.24 0.27 704\n",
|
| 375 |
+
" 9 0.40 0.44 0.42 684\n",
|
| 376 |
+
"\n",
|
| 377 |
+
" accuracy 0.36 7051\n",
|
| 378 |
+
" macro avg 0.36 0.36 0.35 7051\n",
|
| 379 |
+
"weighted avg 0.36 0.36 0.36 7051\n",
|
| 380 |
+
"\n",
|
| 381 |
+
"✅ Confusion Matrix on Test Set:\n",
|
| 382 |
+
"\n",
|
| 383 |
+
"[[293 69 53 87 15 55 61 51 49 59]\n",
|
| 384 |
+
" [119 150 63 103 22 43 52 58 54 45]\n",
|
| 385 |
+
" [ 74 80 225 73 20 55 64 53 50 35]\n",
|
| 386 |
+
" [ 62 65 54 349 37 40 70 41 47 65]\n",
|
| 387 |
+
" [ 16 30 28 61 173 21 36 30 31 60]\n",
|
| 388 |
+
" [ 58 46 44 52 17 224 123 47 36 33]\n",
|
| 389 |
+
" [ 26 38 37 53 13 52 495 36 22 31]\n",
|
| 390 |
+
" [ 52 60 44 77 18 45 80 173 32 53]\n",
|
| 391 |
+
" [ 79 62 65 69 36 47 42 52 170 82]\n",
|
| 392 |
+
" [ 39 48 31 56 29 24 46 46 62 303]]\n",
|
| 393 |
+
"\n",
|
| 394 |
+
"✅ Also saved class probabilities to Phase1/Word2Vec_Proba.csv!\n"
|
| 395 |
+
]
|
| 396 |
+
},
|
| 397 |
+
{
|
| 398 |
+
"data": {
|
| 399 |
+
"text/html": [
|
| 400 |
+
"<div>\n",
|
| 401 |
+
"<style scoped>\n",
|
| 402 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 403 |
+
" vertical-align: middle;\n",
|
| 404 |
+
" }\n",
|
| 405 |
+
"\n",
|
| 406 |
+
" .dataframe tbody tr th {\n",
|
| 407 |
+
" vertical-align: top;\n",
|
| 408 |
+
" }\n",
|
| 409 |
+
"\n",
|
| 410 |
+
" .dataframe thead th {\n",
|
| 411 |
+
" text-align: right;\n",
|
| 412 |
+
" }\n",
|
| 413 |
+
"</style>\n",
|
| 414 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 415 |
+
" <thead>\n",
|
| 416 |
+
" <tr style=\"text-align: right;\">\n",
|
| 417 |
+
" <th></th>\n",
|
| 418 |
+
" <th>y_true</th>\n",
|
| 419 |
+
" <th>class_0</th>\n",
|
| 420 |
+
" <th>class_1</th>\n",
|
| 421 |
+
" <th>class_2</th>\n",
|
| 422 |
+
" <th>class_3</th>\n",
|
| 423 |
+
" <th>class_4</th>\n",
|
| 424 |
+
" <th>class_5</th>\n",
|
| 425 |
+
" <th>class_6</th>\n",
|
| 426 |
+
" <th>class_7</th>\n",
|
| 427 |
+
" <th>class_8</th>\n",
|
| 428 |
+
" <th>class_9</th>\n",
|
| 429 |
+
" </tr>\n",
|
| 430 |
+
" </thead>\n",
|
| 431 |
+
" <tbody>\n",
|
| 432 |
+
" <tr>\n",
|
| 433 |
+
" <th>0</th>\n",
|
| 434 |
+
" <td>2</td>\n",
|
| 435 |
+
" <td>0.001849</td>\n",
|
| 436 |
+
" <td>0.025684</td>\n",
|
| 437 |
+
" <td>0.877031</td>\n",
|
| 438 |
+
" <td>0.003476</td>\n",
|
| 439 |
+
" <td>0.035381</td>\n",
|
| 440 |
+
" <td>0.001487</td>\n",
|
| 441 |
+
" <td>0.005956</td>\n",
|
| 442 |
+
" <td>0.028449</td>\n",
|
| 443 |
+
" <td>0.009045</td>\n",
|
| 444 |
+
" <td>0.011643</td>\n",
|
| 445 |
+
" </tr>\n",
|
| 446 |
+
" <tr>\n",
|
| 447 |
+
" <th>1</th>\n",
|
| 448 |
+
" <td>2</td>\n",
|
| 449 |
+
" <td>0.015427</td>\n",
|
| 450 |
+
" <td>0.064957</td>\n",
|
| 451 |
+
" <td>0.766613</td>\n",
|
| 452 |
+
" <td>0.013290</td>\n",
|
| 453 |
+
" <td>0.003748</td>\n",
|
| 454 |
+
" <td>0.015020</td>\n",
|
| 455 |
+
" <td>0.071054</td>\n",
|
| 456 |
+
" <td>0.031261</td>\n",
|
| 457 |
+
" <td>0.015151</td>\n",
|
| 458 |
+
" <td>0.003479</td>\n",
|
| 459 |
+
" </tr>\n",
|
| 460 |
+
" <tr>\n",
|
| 461 |
+
" <th>2</th>\n",
|
| 462 |
+
" <td>2</td>\n",
|
| 463 |
+
" <td>0.002920</td>\n",
|
| 464 |
+
" <td>0.028854</td>\n",
|
| 465 |
+
" <td>0.895788</td>\n",
|
| 466 |
+
" <td>0.003870</td>\n",
|
| 467 |
+
" <td>0.001294</td>\n",
|
| 468 |
+
" <td>0.027167</td>\n",
|
| 469 |
+
" <td>0.016574</td>\n",
|
| 470 |
+
" <td>0.002937</td>\n",
|
| 471 |
+
" <td>0.015733</td>\n",
|
| 472 |
+
" <td>0.004863</td>\n",
|
| 473 |
+
" </tr>\n",
|
| 474 |
+
" <tr>\n",
|
| 475 |
+
" <th>3</th>\n",
|
| 476 |
+
" <td>2</td>\n",
|
| 477 |
+
" <td>0.042182</td>\n",
|
| 478 |
+
" <td>0.080883</td>\n",
|
| 479 |
+
" <td>0.610961</td>\n",
|
| 480 |
+
" <td>0.017962</td>\n",
|
| 481 |
+
" <td>0.013955</td>\n",
|
| 482 |
+
" <td>0.062833</td>\n",
|
| 483 |
+
" <td>0.094489</td>\n",
|
| 484 |
+
" <td>0.048541</td>\n",
|
| 485 |
+
" <td>0.008779</td>\n",
|
| 486 |
+
" <td>0.019415</td>\n",
|
| 487 |
+
" </tr>\n",
|
| 488 |
+
" <tr>\n",
|
| 489 |
+
" <th>4</th>\n",
|
| 490 |
+
" <td>2</td>\n",
|
| 491 |
+
" <td>0.047361</td>\n",
|
| 492 |
+
" <td>0.035454</td>\n",
|
| 493 |
+
" <td>0.254978</td>\n",
|
| 494 |
+
" <td>0.578128</td>\n",
|
| 495 |
+
" <td>0.000258</td>\n",
|
| 496 |
+
" <td>0.002649</td>\n",
|
| 497 |
+
" <td>0.003503</td>\n",
|
| 498 |
+
" <td>0.044763</td>\n",
|
| 499 |
+
" <td>0.030260</td>\n",
|
| 500 |
+
" <td>0.002646</td>\n",
|
| 501 |
+
" </tr>\n",
|
| 502 |
+
" </tbody>\n",
|
| 503 |
+
"</table>\n",
|
| 504 |
+
"</div>"
|
| 505 |
+
],
|
| 506 |
+
"text/plain": [
|
| 507 |
+
" y_true class_0 class_1 class_2 class_3 class_4 class_5 \\\n",
|
| 508 |
+
"0 2 0.001849 0.025684 0.877031 0.003476 0.035381 0.001487 \n",
|
| 509 |
+
"1 2 0.015427 0.064957 0.766613 0.013290 0.003748 0.015020 \n",
|
| 510 |
+
"2 2 0.002920 0.028854 0.895788 0.003870 0.001294 0.027167 \n",
|
| 511 |
+
"3 2 0.042182 0.080883 0.610961 0.017962 0.013955 0.062833 \n",
|
| 512 |
+
"4 2 0.047361 0.035454 0.254978 0.578128 0.000258 0.002649 \n",
|
| 513 |
+
"\n",
|
| 514 |
+
" class_6 class_7 class_8 class_9 \n",
|
| 515 |
+
"0 0.005956 0.028449 0.009045 0.011643 \n",
|
| 516 |
+
"1 0.071054 0.031261 0.015151 0.003479 \n",
|
| 517 |
+
"2 0.016574 0.002937 0.015733 0.004863 \n",
|
| 518 |
+
"3 0.094489 0.048541 0.008779 0.019415 \n",
|
| 519 |
+
"4 0.003503 0.044763 0.030260 0.002646 "
|
| 520 |
+
]
|
| 521 |
+
},
|
| 522 |
+
"execution_count": 34,
|
| 523 |
+
"metadata": {},
|
| 524 |
+
"output_type": "execute_result"
|
| 525 |
+
}
|
| 526 |
+
],
|
| 527 |
+
"source": [
|
| 528 |
+
"import xgboost as xgb\n",
|
| 529 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 530 |
+
"from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n",
|
| 531 |
+
"import pandas as pd\n",
|
| 532 |
+
"import os\n",
|
| 533 |
+
"\n",
|
| 534 |
+
"# Step 1: Ensure Phase1 folder exists\n",
|
| 535 |
+
"os.makedirs('Phase1', exist_ok=True)\n",
|
| 536 |
+
"\n",
|
| 537 |
+
"# Step 2: Train-test split\n",
|
| 538 |
+
"X_train, X_test, y_train, y_test = train_test_split(X_w2v, y_encoded, test_size=0.2, random_state=42)\n",
|
| 539 |
+
"\n",
|
| 540 |
+
"# Step 3: Train XGBoost model\n",
|
| 541 |
+
"model = xgb.XGBClassifier(use_label_encoder=False, eval_metric='mlogloss')\n",
|
| 542 |
+
"model.fit(X_train, y_train)\n",
|
| 543 |
+
"\n",
|
| 544 |
+
"# Step 4: Predict on the test set\n",
|
| 545 |
+
"y_preds = model.predict(X_test)\n",
|
| 546 |
+
"\n",
|
| 547 |
+
"# Step 5: Evaluate\n",
|
| 548 |
+
"acc = accuracy_score(y_test, y_preds)\n",
|
| 549 |
+
"print(f\"✅ Accuracy on Test Set: {acc:.4f}\\n\")\n",
|
| 550 |
+
"\n",
|
| 551 |
+
"print(\"✅ Classification Report on Test Set:\\n\")\n",
|
| 552 |
+
"print(classification_report(y_test, y_preds))\n",
|
| 553 |
+
"\n",
|
| 554 |
+
"print(\"✅ Confusion Matrix on Test Set:\\n\")\n",
|
| 555 |
+
"print(confusion_matrix(y_test, y_preds))\n",
|
| 556 |
+
"\n",
|
| 557 |
+
"# ---------------------------------\n",
|
| 558 |
+
"# Step 6: Predict probabilities on full data\n",
|
| 559 |
+
"# ---------------------------------\n",
|
| 560 |
+
"y_proba = model.predict_proba(X_w2v)\n",
|
| 561 |
+
"\n",
|
| 562 |
+
"proba_df = pd.DataFrame(y_proba, columns=[f'class_{i}' for i in range(y_proba.shape[1])])\n",
|
| 563 |
+
"proba_df.insert(0, 'y_true', y_encoded)\n",
|
| 564 |
+
"\n",
|
| 565 |
+
"proba_df.to_csv('Phase1/Word2Vec_Proba.csv', index=False)\n",
|
| 566 |
+
"\n",
|
| 567 |
+
"print(\"\\n✅ Also saved class probabilities to Phase1/Word2Vec_Proba.csv!\")\n",
|
| 568 |
+
"proba_df.head()\n"
|
| 569 |
+
]
|
| 570 |
+
},
|
| 571 |
+
{
|
| 572 |
+
"cell_type": "code",
|
| 573 |
+
"execution_count": 13,
|
| 574 |
+
"id": "22efac38",
|
| 575 |
+
"metadata": {},
|
| 576 |
+
"outputs": [],
|
| 577 |
+
"source": [
|
| 578 |
+
"proba_df.to_csv('Phase1/Word2Vec.csv', index=False)"
|
| 579 |
+
]
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"cell_type": "code",
|
| 583 |
+
"execution_count": 35,
|
| 584 |
+
"id": "73fff7e2",
|
| 585 |
+
"metadata": {},
|
| 586 |
+
"outputs": [
|
| 587 |
+
{
|
| 588 |
+
"name": "stderr",
|
| 589 |
+
"output_type": "stream",
|
| 590 |
+
"text": [
|
| 591 |
+
"Generating GloVe Embeddings: 100%|██████████| 35251/35251 [00:01<00:00, 22919.42it/s]"
|
| 592 |
+
]
|
| 593 |
+
},
|
| 594 |
+
{
|
| 595 |
+
"name": "stdout",
|
| 596 |
+
"output_type": "stream",
|
| 597 |
+
"text": [
|
| 598 |
+
"(35251, 300)\n"
|
| 599 |
+
]
|
| 600 |
+
},
|
| 601 |
+
{
|
| 602 |
+
"name": "stderr",
|
| 603 |
+
"output_type": "stream",
|
| 604 |
+
"text": [
|
| 605 |
+
"\n"
|
| 606 |
+
]
|
| 607 |
+
}
|
| 608 |
+
],
|
| 609 |
+
"source": [
|
| 610 |
+
"import numpy as np\n",
|
| 611 |
+
"import gensim.downloader as api\n",
|
| 612 |
+
"\n",
|
| 613 |
+
"# Load Pretrained GloVe Model\n",
|
| 614 |
+
"glove_model = api.load(\"glove-wiki-gigaword-300\") \n",
|
| 615 |
+
"\n",
|
| 616 |
+
"# Convert a sentence to a vector by averaging word embeddings\n",
|
| 617 |
+
"def get_sentence_embedding(sentence, model, dim=300):\n",
|
| 618 |
+
" words = sentence.split()\n",
|
| 619 |
+
" word_vectors = [model[word] for word in words if word in model]\n",
|
| 620 |
+
" return np.mean(word_vectors, axis=0) if word_vectors else np.zeros(dim)\n",
|
| 621 |
+
"\n",
|
| 622 |
+
"text_samples = [\"GloVe embeddings capture word semantics\"]\n",
|
| 623 |
+
"embeddings = np.array([get_sentence_embedding(text, glove_model) for text in text_samples])\n",
|
| 624 |
+
"\n",
|
| 625 |
+
"\n",
|
| 626 |
+
"from tqdm import tqdm\n",
|
| 627 |
+
"\n",
|
| 628 |
+
"# Generate GloVe embeddings for the entire dataset with a progress bar\n",
|
| 629 |
+
"df['glove_embeddings'] = df['Input'].apply(lambda x: get_sentence_embedding(x, glove_model))\n",
|
| 630 |
+
"\n",
|
| 631 |
+
"# Convert the list of embeddings to a numpy array\n",
|
| 632 |
+
"X_glove = np.array([get_sentence_embedding(text, glove_model) for text in tqdm(df['Input'], desc=\"Generating GloVe Embeddings\")])\n",
|
| 633 |
+
"\n",
|
| 634 |
+
"# Check the shape of the generated embeddings\n",
|
| 635 |
+
"print(X_glove.shape)"
|
| 636 |
+
]
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"cell_type": "code",
|
| 640 |
+
"execution_count": 36,
|
| 641 |
+
"id": "05754d5c",
|
| 642 |
+
"metadata": {},
|
| 643 |
+
"outputs": [],
|
| 644 |
+
"source": [
|
| 645 |
+
"X = df['glove_embeddings']\n",
|
| 646 |
+
"y = label_encoding(df['Tags']) \n"
|
| 647 |
+
]
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"cell_type": "code",
|
| 651 |
+
"execution_count": 37,
|
| 652 |
+
"id": "3092399e",
|
| 653 |
+
"metadata": {},
|
| 654 |
+
"outputs": [
|
| 655 |
+
{
|
| 656 |
+
"name": "stdout",
|
| 657 |
+
"output_type": "stream",
|
| 658 |
+
"text": [
|
| 659 |
+
"✅ Accuracy on Test Set: 0.7103\n",
|
| 660 |
+
"\n",
|
| 661 |
+
"✅ Classification Report on Test Set:\n",
|
| 662 |
+
"\n",
|
| 663 |
+
" precision recall f1-score support\n",
|
| 664 |
+
"\n",
|
| 665 |
+
" 0 0.66 0.64 0.65 792\n",
|
| 666 |
+
" 1 0.45 0.50 0.47 709\n",
|
| 667 |
+
" 2 0.80 0.79 0.79 729\n",
|
| 668 |
+
" 3 0.70 0.70 0.70 830\n",
|
| 669 |
+
" 4 0.72 0.63 0.67 486\n",
|
| 670 |
+
" 5 0.81 0.83 0.82 680\n",
|
| 671 |
+
" 6 0.91 0.94 0.93 803\n",
|
| 672 |
+
" 7 0.68 0.74 0.70 634\n",
|
| 673 |
+
" 8 0.59 0.53 0.56 704\n",
|
| 674 |
+
" 9 0.78 0.76 0.77 684\n",
|
| 675 |
+
"\n",
|
| 676 |
+
" accuracy 0.71 7051\n",
|
| 677 |
+
" macro avg 0.71 0.71 0.71 7051\n",
|
| 678 |
+
"weighted avg 0.71 0.71 0.71 7051\n",
|
| 679 |
+
"\n",
|
| 680 |
+
"✅ Confusion Matrix on Test Set:\n",
|
| 681 |
+
"\n",
|
| 682 |
+
"[[506 137 14 16 5 24 5 31 42 12]\n",
|
| 683 |
+
" [123 354 16 20 7 31 12 37 88 21]\n",
|
| 684 |
+
" [ 17 22 575 28 22 25 9 14 14 3]\n",
|
| 685 |
+
" [ 19 35 15 581 45 16 5 27 47 40]\n",
|
| 686 |
+
" [ 4 26 24 63 308 10 11 8 18 14]\n",
|
| 687 |
+
" [ 19 29 17 11 5 565 13 7 2 12]\n",
|
| 688 |
+
" [ 1 4 10 5 4 7 758 9 4 1]\n",
|
| 689 |
+
" [ 24 37 19 19 7 5 11 466 24 22]\n",
|
| 690 |
+
" [ 39 108 28 48 12 5 6 59 376 23]\n",
|
| 691 |
+
" [ 9 30 5 43 13 8 3 30 24 519]]\n",
|
| 692 |
+
"\n",
|
| 693 |
+
"✅ Saved probabilities to Phase1/GloVe.csv!\n"
|
| 694 |
+
]
|
| 695 |
+
},
|
| 696 |
+
{
|
| 697 |
+
"data": {
|
| 698 |
+
"text/html": [
|
| 699 |
+
"<div>\n",
|
| 700 |
+
"<style scoped>\n",
|
| 701 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 702 |
+
" vertical-align: middle;\n",
|
| 703 |
+
" }\n",
|
| 704 |
+
"\n",
|
| 705 |
+
" .dataframe tbody tr th {\n",
|
| 706 |
+
" vertical-align: top;\n",
|
| 707 |
+
" }\n",
|
| 708 |
+
"\n",
|
| 709 |
+
" .dataframe thead th {\n",
|
| 710 |
+
" text-align: right;\n",
|
| 711 |
+
" }\n",
|
| 712 |
+
"</style>\n",
|
| 713 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 714 |
+
" <thead>\n",
|
| 715 |
+
" <tr style=\"text-align: right;\">\n",
|
| 716 |
+
" <th></th>\n",
|
| 717 |
+
" <th>y_true</th>\n",
|
| 718 |
+
" <th>class_0</th>\n",
|
| 719 |
+
" <th>class_1</th>\n",
|
| 720 |
+
" <th>class_2</th>\n",
|
| 721 |
+
" <th>class_3</th>\n",
|
| 722 |
+
" <th>class_4</th>\n",
|
| 723 |
+
" <th>class_5</th>\n",
|
| 724 |
+
" <th>class_6</th>\n",
|
| 725 |
+
" <th>class_7</th>\n",
|
| 726 |
+
" <th>class_8</th>\n",
|
| 727 |
+
" <th>class_9</th>\n",
|
| 728 |
+
" </tr>\n",
|
| 729 |
+
" </thead>\n",
|
| 730 |
+
" <tbody>\n",
|
| 731 |
+
" <tr>\n",
|
| 732 |
+
" <th>0</th>\n",
|
| 733 |
+
" <td>2</td>\n",
|
| 734 |
+
" <td>3.351565e-08</td>\n",
|
| 735 |
+
" <td>3.210468e-08</td>\n",
|
| 736 |
+
" <td>0.999735</td>\n",
|
| 737 |
+
" <td>0.000002</td>\n",
|
| 738 |
+
" <td>0.000007</td>\n",
|
| 739 |
+
" <td>0.000002</td>\n",
|
| 740 |
+
" <td>0.000029</td>\n",
|
| 741 |
+
" <td>0.000017</td>\n",
|
| 742 |
+
" <td>0.000204</td>\n",
|
| 743 |
+
" <td>0.000004</td>\n",
|
| 744 |
+
" </tr>\n",
|
| 745 |
+
" <tr>\n",
|
| 746 |
+
" <th>1</th>\n",
|
| 747 |
+
" <td>2</td>\n",
|
| 748 |
+
" <td>5.428256e-04</td>\n",
|
| 749 |
+
" <td>5.070415e-05</td>\n",
|
| 750 |
+
" <td>0.998922</td>\n",
|
| 751 |
+
" <td>0.000006</td>\n",
|
| 752 |
+
" <td>0.000039</td>\n",
|
| 753 |
+
" <td>0.000233</td>\n",
|
| 754 |
+
" <td>0.000101</td>\n",
|
| 755 |
+
" <td>0.000028</td>\n",
|
| 756 |
+
" <td>0.000066</td>\n",
|
| 757 |
+
" <td>0.000011</td>\n",
|
| 758 |
+
" </tr>\n",
|
| 759 |
+
" <tr>\n",
|
| 760 |
+
" <th>2</th>\n",
|
| 761 |
+
" <td>2</td>\n",
|
| 762 |
+
" <td>1.161756e-03</td>\n",
|
| 763 |
+
" <td>1.901476e-03</td>\n",
|
| 764 |
+
" <td>0.992795</td>\n",
|
| 765 |
+
" <td>0.000341</td>\n",
|
| 766 |
+
" <td>0.000429</td>\n",
|
| 767 |
+
" <td>0.000718</td>\n",
|
| 768 |
+
" <td>0.000008</td>\n",
|
| 769 |
+
" <td>0.000079</td>\n",
|
| 770 |
+
" <td>0.000842</td>\n",
|
| 771 |
+
" <td>0.001723</td>\n",
|
| 772 |
+
" </tr>\n",
|
| 773 |
+
" <tr>\n",
|
| 774 |
+
" <th>3</th>\n",
|
| 775 |
+
" <td>2</td>\n",
|
| 776 |
+
" <td>5.475805e-02</td>\n",
|
| 777 |
+
" <td>1.380654e-01</td>\n",
|
| 778 |
+
" <td>0.112860</td>\n",
|
| 779 |
+
" <td>0.074255</td>\n",
|
| 780 |
+
" <td>0.013319</td>\n",
|
| 781 |
+
" <td>0.010117</td>\n",
|
| 782 |
+
" <td>0.116254</td>\n",
|
| 783 |
+
" <td>0.105238</td>\n",
|
| 784 |
+
" <td>0.271900</td>\n",
|
| 785 |
+
" <td>0.103235</td>\n",
|
| 786 |
+
" </tr>\n",
|
| 787 |
+
" <tr>\n",
|
| 788 |
+
" <th>4</th>\n",
|
| 789 |
+
" <td>2</td>\n",
|
| 790 |
+
" <td>1.400376e-02</td>\n",
|
| 791 |
+
" <td>7.604539e-02</td>\n",
|
| 792 |
+
" <td>0.115192</td>\n",
|
| 793 |
+
" <td>0.251055</td>\n",
|
| 794 |
+
" <td>0.087261</td>\n",
|
| 795 |
+
" <td>0.082912</td>\n",
|
| 796 |
+
" <td>0.030533</td>\n",
|
| 797 |
+
" <td>0.185033</td>\n",
|
| 798 |
+
" <td>0.074275</td>\n",
|
| 799 |
+
" <td>0.083690</td>\n",
|
| 800 |
+
" </tr>\n",
|
| 801 |
+
" </tbody>\n",
|
| 802 |
+
"</table>\n",
|
| 803 |
+
"</div>"
|
| 804 |
+
],
|
| 805 |
+
"text/plain": [
|
| 806 |
+
" y_true class_0 class_1 class_2 class_3 class_4 class_5 \\\n",
|
| 807 |
+
"0 2 3.351565e-08 3.210468e-08 0.999735 0.000002 0.000007 0.000002 \n",
|
| 808 |
+
"1 2 5.428256e-04 5.070415e-05 0.998922 0.000006 0.000039 0.000233 \n",
|
| 809 |
+
"2 2 1.161756e-03 1.901476e-03 0.992795 0.000341 0.000429 0.000718 \n",
|
| 810 |
+
"3 2 5.475805e-02 1.380654e-01 0.112860 0.074255 0.013319 0.010117 \n",
|
| 811 |
+
"4 2 1.400376e-02 7.604539e-02 0.115192 0.251055 0.087261 0.082912 \n",
|
| 812 |
+
"\n",
|
| 813 |
+
" class_6 class_7 class_8 class_9 \n",
|
| 814 |
+
"0 0.000029 0.000017 0.000204 0.000004 \n",
|
| 815 |
+
"1 0.000101 0.000028 0.000066 0.000011 \n",
|
| 816 |
+
"2 0.000008 0.000079 0.000842 0.001723 \n",
|
| 817 |
+
"3 0.116254 0.105238 0.271900 0.103235 \n",
|
| 818 |
+
"4 0.030533 0.185033 0.074275 0.083690 "
|
| 819 |
+
]
|
| 820 |
+
},
|
| 821 |
+
"execution_count": 37,
|
| 822 |
+
"metadata": {},
|
| 823 |
+
"output_type": "execute_result"
|
| 824 |
+
}
|
| 825 |
+
],
|
| 826 |
+
"source": [
|
| 827 |
+
"from sklearn.svm import SVC\n",
|
| 828 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 829 |
+
"from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n",
|
| 830 |
+
"import pandas as pd\n",
|
| 831 |
+
"import os\n",
|
| 832 |
+
"import numpy as np\n",
|
| 833 |
+
"\n",
|
| 834 |
+
"# Step 0: Ensure Phase1 folder exists\n",
|
| 835 |
+
"os.makedirs('Phase1', exist_ok=True)\n",
|
| 836 |
+
"\n",
|
| 837 |
+
"# Step 1: Fix X if needed\n",
|
| 838 |
+
"if isinstance(X.iloc[0], np.ndarray):\n",
|
| 839 |
+
" X = np.vstack(X.values)\n",
|
| 840 |
+
"\n",
|
| 841 |
+
"# Step 2: Train-test split\n",
|
| 842 |
+
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
|
| 843 |
+
"\n",
|
| 844 |
+
"# Step 3: Train SVM model\n",
|
| 845 |
+
"model = SVC(kernel='linear', probability=True)\n",
|
| 846 |
+
"model.fit(X_train, y_train)\n",
|
| 847 |
+
"\n",
|
| 848 |
+
"# Step 4: Predict on the test set\n",
|
| 849 |
+
"y_preds = model.predict(X_test)\n",
|
| 850 |
+
"\n",
|
| 851 |
+
"# Step 5: Evaluate\n",
|
| 852 |
+
"acc = accuracy_score(y_test, y_preds)\n",
|
| 853 |
+
"print(f\"✅ Accuracy on Test Set: {acc:.4f}\\n\")\n",
|
| 854 |
+
"\n",
|
| 855 |
+
"print(\"✅ Classification Report on Test Set:\\n\")\n",
|
| 856 |
+
"print(classification_report(y_test, y_preds))\n",
|
| 857 |
+
"\n",
|
| 858 |
+
"print(\"✅ Confusion Matrix on Test Set:\\n\")\n",
|
| 859 |
+
"print(confusion_matrix(y_test, y_preds))\n",
|
| 860 |
+
"\n",
|
| 861 |
+
"# ---------------------------------\n",
|
| 862 |
+
"# Step 6: Predict probabilities on full data\n",
|
| 863 |
+
"# ---------------------------------\n",
|
| 864 |
+
"y_proba = model.predict_proba(X)\n",
|
| 865 |
+
"\n",
|
| 866 |
+
"# Step 7: Save probability scores\n",
|
| 867 |
+
"proba_df = pd.DataFrame(y_proba, columns=[f'class_{i}' for i in range(y_proba.shape[1])])\n",
|
| 868 |
+
"proba_df.insert(0, 'y_true', y)\n",
|
| 869 |
+
"\n",
|
| 870 |
+
"proba_df.to_csv('Phase1/GloVe.csv', index=False)\n",
|
| 871 |
+
"\n",
|
| 872 |
+
"print(\"\\n✅ Saved probabilities to Phase1/GloVe.csv!\")\n",
|
| 873 |
+
"proba_df.head()\n"
|
| 874 |
+
]
|
| 875 |
+
},
|
| 876 |
+
{
|
| 877 |
+
"cell_type": "code",
|
| 878 |
+
"execution_count": 25,
|
| 879 |
+
"id": "046eeb56",
|
| 880 |
+
"metadata": {},
|
| 881 |
+
"outputs": [
|
| 882 |
+
{
|
| 883 |
+
"name": "stdout",
|
| 884 |
+
"output_type": "stream",
|
| 885 |
+
"text": [
|
| 886 |
+
"Collecting transformers\n",
|
| 887 |
+
" Using cached transformers-4.51.3-py3-none-any.whl (10.4 MB)\n",
|
| 888 |
+
"Requirement already satisfied: filelock in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from transformers) (3.18.0)\n",
|
| 889 |
+
"Collecting requests\n",
|
| 890 |
+
" Using cached requests-2.32.3-py3-none-any.whl (64 kB)\n",
|
| 891 |
+
"Collecting pyyaml>=5.1\n",
|
| 892 |
+
" Using cached PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (751 kB)\n",
|
| 893 |
+
"Collecting huggingface-hub<1.0,>=0.30.0\n",
|
| 894 |
+
" Using cached huggingface_hub-0.30.2-py3-none-any.whl (481 kB)\n",
|
| 895 |
+
"Collecting tokenizers<0.22,>=0.21\n",
|
| 896 |
+
" Using cached tokenizers-0.21.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.0 MB)\n",
|
| 897 |
+
"Collecting safetensors>=0.4.3\n",
|
| 898 |
+
" Using cached safetensors-0.5.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (471 kB)\n",
|
| 899 |
+
"Requirement already satisfied: numpy>=1.17 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from transformers) (1.26.4)\n",
|
| 900 |
+
"Requirement already satisfied: tqdm>=4.27 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from transformers) (4.67.1)\n",
|
| 901 |
+
"Requirement already satisfied: packaging>=20.0 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from transformers) (25.0)\n",
|
| 902 |
+
"Requirement already satisfied: regex!=2019.12.17 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from transformers) (2024.11.6)\n",
|
| 903 |
+
"Requirement already satisfied: fsspec>=2023.5.0 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.30.0->transformers) (2025.3.2)\n",
|
| 904 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.30.0->transformers) (4.13.2)\n",
|
| 905 |
+
"Collecting idna<4,>=2.5\n",
|
| 906 |
+
" Using cached idna-3.10-py3-none-any.whl (70 kB)\n",
|
| 907 |
+
"Collecting certifi>=2017.4.17\n",
|
| 908 |
+
" Using cached certifi-2025.4.26-py3-none-any.whl (159 kB)\n",
|
| 909 |
+
"Collecting urllib3<3,>=1.21.1\n",
|
| 910 |
+
" Using cached urllib3-2.4.0-py3-none-any.whl (128 kB)\n",
|
| 911 |
+
"Collecting charset-normalizer<4,>=2\n",
|
| 912 |
+
" Using cached charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (146 kB)\n",
|
| 913 |
+
"Installing collected packages: urllib3, safetensors, pyyaml, idna, charset-normalizer, certifi, requests, huggingface-hub, tokenizers, transformers\n",
|
| 914 |
+
"Successfully installed certifi-2025.4.26 charset-normalizer-3.4.1 huggingface-hub-0.30.2 idna-3.10 pyyaml-6.0.2 requests-2.32.3 safetensors-0.5.3 tokenizers-0.21.1 transformers-4.51.3 urllib3-2.4.0\n",
|
| 915 |
+
"\n",
|
| 916 |
+
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m25.1\u001b[0m\n",
|
| 917 |
+
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
| 918 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
| 919 |
+
]
|
| 920 |
+
}
|
| 921 |
+
],
|
| 922 |
+
"source": [
|
| 923 |
+
"%pip install transformers"
|
| 924 |
+
]
|
| 925 |
+
},
|
| 926 |
+
{
|
| 927 |
+
"cell_type": "code",
|
| 928 |
+
"execution_count": null,
|
| 929 |
+
"id": "76540636",
|
| 930 |
+
"metadata": {},
|
| 931 |
+
"outputs": [
|
| 932 |
+
{
|
| 933 |
+
"ename": "NameError",
|
| 934 |
+
"evalue": "name 'label_encoding' is not defined",
|
| 935 |
+
"output_type": "error",
|
| 936 |
+
"traceback": [
|
| 937 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
| 938 |
+
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
|
| 939 |
+
"Cell \u001b[0;32mIn[5], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m X \u001b[38;5;241m=\u001b[39m embeddings\n\u001b[0;32m----> 2\u001b[0m y \u001b[38;5;241m=\u001b[39m \u001b[43mlabel_encoding\u001b[49m(df[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTags\u001b[39m\u001b[38;5;124m'\u001b[39m])\n",
|
| 940 |
+
"\u001b[0;31mNameError\u001b[0m: name 'label_encoding' is not defined"
|
| 941 |
+
]
|
| 942 |
+
}
|
| 943 |
+
],
|
| 944 |
+
"source": []
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"cell_type": "code",
|
| 948 |
+
"execution_count": 38,
|
| 949 |
+
"id": "335daa95",
|
| 950 |
+
"metadata": {},
|
| 951 |
+
"outputs": [
|
| 952 |
+
{
|
| 953 |
+
"name": "stderr",
|
| 954 |
+
"output_type": "stream",
|
| 955 |
+
"text": [
|
| 956 |
+
"Processing: 100%|██████████| 2204/2204 [02:07<00:00, 17.25it/s]\n"
|
| 957 |
+
]
|
| 958 |
+
}
|
| 959 |
+
],
|
| 960 |
+
"source": [
|
| 961 |
+
"import torch\n",
|
| 962 |
+
"import pandas as pd\n",
|
| 963 |
+
"import numpy as np\n",
|
| 964 |
+
"from tqdm import tqdm\n",
|
| 965 |
+
"from transformers import AlbertTokenizer, AlbertModel\n",
|
| 966 |
+
"\n",
|
| 967 |
+
"# Load ALBERT tokenizer and model\n",
|
| 968 |
+
"tokenizer = AlbertTokenizer.from_pretrained(\"albert-base-v2\")\n",
|
| 969 |
+
"model = AlbertModel.from_pretrained(\"albert-base-v2\")\n",
|
| 970 |
+
"\n",
|
| 971 |
+
"# Set device (GPU if available, otherwise CPU)\n",
|
| 972 |
+
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
|
| 973 |
+
"model.to(device)\n",
|
| 974 |
+
"\n",
|
| 975 |
+
"# Load your dataset (Ensure df['text'] contains the text data)\n",
|
| 976 |
+
"# df = pd.read_csv(\"your_dataset.csv\") # Replace with your dataset file\n",
|
| 977 |
+
"texts = df[\"Input\"].tolist()\n",
|
| 978 |
+
"\n",
|
| 979 |
+
"# Function to generate embeddings\n",
|
| 980 |
+
"def get_albert_embeddings(texts, batch_size=16):\n",
|
| 981 |
+
" model.eval() # Set model to evaluation mode\n",
|
| 982 |
+
" embeddings_list = []\n",
|
| 983 |
+
"\n",
|
| 984 |
+
" for i in tqdm(range(0, len(texts), batch_size), desc=\"Processing\"):\n",
|
| 985 |
+
" batch_texts = texts[i : i + batch_size]\n",
|
| 986 |
+
" \n",
|
| 987 |
+
" # Tokenize the batch\n",
|
| 988 |
+
" inputs = tokenizer(batch_texts, return_tensors=\"pt\", padding=True, truncation=True, max_length=512)\n",
|
| 989 |
+
" inputs = {key: val.to(device) for key, val in inputs.items()} # Move to GPU/CPU\n",
|
| 990 |
+
" \n",
|
| 991 |
+
" # Get embeddings\n",
|
| 992 |
+
" with torch.no_grad():\n",
|
| 993 |
+
" outputs = model(**inputs)\n",
|
| 994 |
+
" \n",
|
| 995 |
+
" # Extract [CLS] token representation (sentence embedding)\n",
|
| 996 |
+
" cls_embeddings = outputs.last_hidden_state[:, 0, :].cpu().numpy()\n",
|
| 997 |
+
" embeddings_list.append(cls_embeddings)\n",
|
| 998 |
+
"\n",
|
| 999 |
+
" return np.vstack(embeddings_list) # Stack all embeddings\n",
|
| 1000 |
+
"\n",
|
| 1001 |
+
"# Generate embeddings for all texts\n",
|
| 1002 |
+
"embeddings = get_albert_embeddings(texts)"
|
| 1003 |
+
]
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"cell_type": "code",
|
| 1007 |
+
"execution_count": 39,
|
| 1008 |
+
"id": "4cca424b",
|
| 1009 |
+
"metadata": {},
|
| 1010 |
+
"outputs": [],
|
| 1011 |
+
"source": [
|
| 1012 |
+
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
|
| 1013 |
+
"from sklearn.preprocessing import LabelEncoder\n",
|
| 1014 |
+
"\n",
|
| 1015 |
+
"def vectorirse_text(text):\n",
|
| 1016 |
+
" \"\"\" Recieves text as input and returns TF-IDF vectors\"\"\"\n",
|
| 1017 |
+
" tfidf = TfidfVectorizer(max_features=500000)\n",
|
| 1018 |
+
" X = tfidf.fit_transform(text)\n",
|
| 1019 |
+
" return X\n",
|
| 1020 |
+
"\n",
|
| 1021 |
+
"def label_encoding(input):\n",
|
| 1022 |
+
" label_encoder = LabelEncoder()\n",
|
| 1023 |
+
" return label_encoder.fit_transform(input)\n",
|
| 1024 |
+
"\n",
|
| 1025 |
+
"X = embeddings\n",
|
| 1026 |
+
"y = label_encoding(df['Tags'])"
|
| 1027 |
+
]
|
| 1028 |
+
},
|
| 1029 |
+
{
|
| 1030 |
+
"cell_type": "code",
|
| 1031 |
+
"execution_count": 40,
|
| 1032 |
+
"id": "c7f18d8e",
|
| 1033 |
+
"metadata": {},
|
| 1034 |
+
"outputs": [
|
| 1035 |
+
{
|
| 1036 |
+
"name": "stderr",
|
| 1037 |
+
"output_type": "stream",
|
| 1038 |
+
"text": [
|
| 1039 |
+
"/home/darth/.pyenv/versions/major02/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py:465: ConvergenceWarning: lbfgs failed to converge (status=1):\n",
|
| 1040 |
+
"STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
|
| 1041 |
+
"\n",
|
| 1042 |
+
"Increase the number of iterations (max_iter) or scale the data as shown in:\n",
|
| 1043 |
+
" https://scikit-learn.org/stable/modules/preprocessing.html\n",
|
| 1044 |
+
"Please also refer to the documentation for alternative solver options:\n",
|
| 1045 |
+
" https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
|
| 1046 |
+
" n_iter_i = _check_optimize_result(\n"
|
| 1047 |
+
]
|
| 1048 |
+
},
|
| 1049 |
+
{
|
| 1050 |
+
"name": "stdout",
|
| 1051 |
+
"output_type": "stream",
|
| 1052 |
+
"text": [
|
| 1053 |
+
"✅ Accuracy on Test Set: 0.7949\n",
|
| 1054 |
+
"\n",
|
| 1055 |
+
"✅ Classification Report on Test Set:\n",
|
| 1056 |
+
"\n",
|
| 1057 |
+
" precision recall f1-score support\n",
|
| 1058 |
+
"\n",
|
| 1059 |
+
" 0 0.76 0.73 0.74 792\n",
|
| 1060 |
+
" 1 0.66 0.66 0.66 709\n",
|
| 1061 |
+
" 2 0.84 0.82 0.83 729\n",
|
| 1062 |
+
" 3 0.79 0.82 0.80 830\n",
|
| 1063 |
+
" 4 0.79 0.76 0.77 486\n",
|
| 1064 |
+
" 5 0.90 0.92 0.91 680\n",
|
| 1065 |
+
" 6 0.98 0.99 0.98 803\n",
|
| 1066 |
+
" 7 0.73 0.75 0.74 634\n",
|
| 1067 |
+
" 8 0.65 0.63 0.64 704\n",
|
| 1068 |
+
" 9 0.82 0.86 0.84 684\n",
|
| 1069 |
+
"\n",
|
| 1070 |
+
" accuracy 0.79 7051\n",
|
| 1071 |
+
" macro avg 0.79 0.79 0.79 7051\n",
|
| 1072 |
+
"weighted avg 0.79 0.79 0.79 7051\n",
|
| 1073 |
+
"\n",
|
| 1074 |
+
"✅ Confusion Matrix on Test Set:\n",
|
| 1075 |
+
"\n",
|
| 1076 |
+
"[[575 95 18 10 3 12 0 33 28 18]\n",
|
| 1077 |
+
" [ 84 467 12 11 5 16 1 28 69 16]\n",
|
| 1078 |
+
" [ 13 9 595 26 26 21 3 12 23 1]\n",
|
| 1079 |
+
" [ 6 15 11 677 39 3 0 15 41 23]\n",
|
| 1080 |
+
" [ 5 10 11 40 370 1 6 8 21 14]\n",
|
| 1081 |
+
" [ 9 12 16 4 4 623 3 3 1 5]\n",
|
| 1082 |
+
" [ 1 1 3 1 1 0 791 4 0 1]\n",
|
| 1083 |
+
" [ 25 27 16 23 6 3 5 478 35 16]\n",
|
| 1084 |
+
" [ 34 56 24 38 13 6 1 58 441 33]\n",
|
| 1085 |
+
" [ 6 17 3 26 3 5 0 13 23 588]]\n",
|
| 1086 |
+
"\n",
|
| 1087 |
+
"✅ Saved class probabilities to Phase1/Albert.csv!\n"
|
| 1088 |
+
]
|
| 1089 |
+
},
|
| 1090 |
+
{
|
| 1091 |
+
"data": {
|
| 1092 |
+
"text/html": [
|
| 1093 |
+
"<div>\n",
|
| 1094 |
+
"<style scoped>\n",
|
| 1095 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 1096 |
+
" vertical-align: middle;\n",
|
| 1097 |
+
" }\n",
|
| 1098 |
+
"\n",
|
| 1099 |
+
" .dataframe tbody tr th {\n",
|
| 1100 |
+
" vertical-align: top;\n",
|
| 1101 |
+
" }\n",
|
| 1102 |
+
"\n",
|
| 1103 |
+
" .dataframe thead th {\n",
|
| 1104 |
+
" text-align: right;\n",
|
| 1105 |
+
" }\n",
|
| 1106 |
+
"</style>\n",
|
| 1107 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 1108 |
+
" <thead>\n",
|
| 1109 |
+
" <tr style=\"text-align: right;\">\n",
|
| 1110 |
+
" <th></th>\n",
|
| 1111 |
+
" <th>y_true</th>\n",
|
| 1112 |
+
" <th>class_0</th>\n",
|
| 1113 |
+
" <th>class_1</th>\n",
|
| 1114 |
+
" <th>class_2</th>\n",
|
| 1115 |
+
" <th>class_3</th>\n",
|
| 1116 |
+
" <th>class_4</th>\n",
|
| 1117 |
+
" <th>class_5</th>\n",
|
| 1118 |
+
" <th>class_6</th>\n",
|
| 1119 |
+
" <th>class_7</th>\n",
|
| 1120 |
+
" <th>class_8</th>\n",
|
| 1121 |
+
" <th>class_9</th>\n",
|
| 1122 |
+
" </tr>\n",
|
| 1123 |
+
" </thead>\n",
|
| 1124 |
+
" <tbody>\n",
|
| 1125 |
+
" <tr>\n",
|
| 1126 |
+
" <th>0</th>\n",
|
| 1127 |
+
" <td>2</td>\n",
|
| 1128 |
+
" <td>4.046189e-06</td>\n",
|
| 1129 |
+
" <td>1.673384e-06</td>\n",
|
| 1130 |
+
" <td>0.996728</td>\n",
|
| 1131 |
+
" <td>8.242731e-04</td>\n",
|
| 1132 |
+
" <td>0.002150</td>\n",
|
| 1133 |
+
" <td>0.000016</td>\n",
|
| 1134 |
+
" <td>2.555716e-07</td>\n",
|
| 1135 |
+
" <td>8.192644e-05</td>\n",
|
| 1136 |
+
" <td>0.000194</td>\n",
|
| 1137 |
+
" <td>8.416093e-09</td>\n",
|
| 1138 |
+
" </tr>\n",
|
| 1139 |
+
" <tr>\n",
|
| 1140 |
+
" <th>1</th>\n",
|
| 1141 |
+
" <td>2</td>\n",
|
| 1142 |
+
" <td>2.162968e-07</td>\n",
|
| 1143 |
+
" <td>8.667423e-07</td>\n",
|
| 1144 |
+
" <td>0.999940</td>\n",
|
| 1145 |
+
" <td>3.937645e-07</td>\n",
|
| 1146 |
+
" <td>0.000004</td>\n",
|
| 1147 |
+
" <td>0.000004</td>\n",
|
| 1148 |
+
" <td>6.603543e-08</td>\n",
|
| 1149 |
+
" <td>2.003625e-07</td>\n",
|
| 1150 |
+
" <td>0.000051</td>\n",
|
| 1151 |
+
" <td>2.772808e-09</td>\n",
|
| 1152 |
+
" </tr>\n",
|
| 1153 |
+
" <tr>\n",
|
| 1154 |
+
" <th>2</th>\n",
|
| 1155 |
+
" <td>2</td>\n",
|
| 1156 |
+
" <td>1.016335e-04</td>\n",
|
| 1157 |
+
" <td>3.334410e-04</td>\n",
|
| 1158 |
+
" <td>0.998280</td>\n",
|
| 1159 |
+
" <td>5.001128e-05</td>\n",
|
| 1160 |
+
" <td>0.000006</td>\n",
|
| 1161 |
+
" <td>0.000730</td>\n",
|
| 1162 |
+
" <td>2.283566e-07</td>\n",
|
| 1163 |
+
" <td>1.428000e-06</td>\n",
|
| 1164 |
+
" <td>0.000497</td>\n",
|
| 1165 |
+
" <td>7.577643e-07</td>\n",
|
| 1166 |
+
" </tr>\n",
|
| 1167 |
+
" <tr>\n",
|
| 1168 |
+
" <th>3</th>\n",
|
| 1169 |
+
" <td>2</td>\n",
|
| 1170 |
+
" <td>1.920308e-03</td>\n",
|
| 1171 |
+
" <td>1.006953e-02</td>\n",
|
| 1172 |
+
" <td>0.955455</td>\n",
|
| 1173 |
+
" <td>2.492423e-03</td>\n",
|
| 1174 |
+
" <td>0.006655</td>\n",
|
| 1175 |
+
" <td>0.000183</td>\n",
|
| 1176 |
+
" <td>3.224371e-04</td>\n",
|
| 1177 |
+
" <td>1.568429e-02</td>\n",
|
| 1178 |
+
" <td>0.006673</td>\n",
|
| 1179 |
+
" <td>5.461269e-04</td>\n",
|
| 1180 |
+
" </tr>\n",
|
| 1181 |
+
" <tr>\n",
|
| 1182 |
+
" <th>4</th>\n",
|
| 1183 |
+
" <td>2</td>\n",
|
| 1184 |
+
" <td>2.279302e-04</td>\n",
|
| 1185 |
+
" <td>3.630354e-05</td>\n",
|
| 1186 |
+
" <td>0.944598</td>\n",
|
| 1187 |
+
" <td>4.439209e-02</td>\n",
|
| 1188 |
+
" <td>0.002457</td>\n",
|
| 1189 |
+
" <td>0.000046</td>\n",
|
| 1190 |
+
" <td>6.339463e-06</td>\n",
|
| 1191 |
+
" <td>7.960528e-03</td>\n",
|
| 1192 |
+
" <td>0.000233</td>\n",
|
| 1193 |
+
" <td>4.337906e-05</td>\n",
|
| 1194 |
+
" </tr>\n",
|
| 1195 |
+
" </tbody>\n",
|
| 1196 |
+
"</table>\n",
|
| 1197 |
+
"</div>"
|
| 1198 |
+
],
|
| 1199 |
+
"text/plain": [
|
| 1200 |
+
" y_true class_0 class_1 class_2 class_3 class_4 \\\n",
|
| 1201 |
+
"0 2 4.046189e-06 1.673384e-06 0.996728 8.242731e-04 0.002150 \n",
|
| 1202 |
+
"1 2 2.162968e-07 8.667423e-07 0.999940 3.937645e-07 0.000004 \n",
|
| 1203 |
+
"2 2 1.016335e-04 3.334410e-04 0.998280 5.001128e-05 0.000006 \n",
|
| 1204 |
+
"3 2 1.920308e-03 1.006953e-02 0.955455 2.492423e-03 0.006655 \n",
|
| 1205 |
+
"4 2 2.279302e-04 3.630354e-05 0.944598 4.439209e-02 0.002457 \n",
|
| 1206 |
+
"\n",
|
| 1207 |
+
" class_5 class_6 class_7 class_8 class_9 \n",
|
| 1208 |
+
"0 0.000016 2.555716e-07 8.192644e-05 0.000194 8.416093e-09 \n",
|
| 1209 |
+
"1 0.000004 6.603543e-08 2.003625e-07 0.000051 2.772808e-09 \n",
|
| 1210 |
+
"2 0.000730 2.283566e-07 1.428000e-06 0.000497 7.577643e-07 \n",
|
| 1211 |
+
"3 0.000183 3.224371e-04 1.568429e-02 0.006673 5.461269e-04 \n",
|
| 1212 |
+
"4 0.000046 6.339463e-06 7.960528e-03 0.000233 4.337906e-05 "
|
| 1213 |
+
]
|
| 1214 |
+
},
|
| 1215 |
+
"execution_count": 40,
|
| 1216 |
+
"metadata": {},
|
| 1217 |
+
"output_type": "execute_result"
|
| 1218 |
+
}
|
| 1219 |
+
],
|
| 1220 |
+
"source": [
|
| 1221 |
+
"from sklearn.linear_model import LogisticRegression\n",
|
| 1222 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 1223 |
+
"from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n",
|
| 1224 |
+
"import pandas as pd\n",
|
| 1225 |
+
"import os\n",
|
| 1226 |
+
"\n",
|
| 1227 |
+
"# Step 0: Ensure Phase1 folder exists\n",
|
| 1228 |
+
"os.makedirs('Phase1', exist_ok=True)\n",
|
| 1229 |
+
"\n",
|
| 1230 |
+
"# Step 1: Train-test split\n",
|
| 1231 |
+
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
|
| 1232 |
+
"\n",
|
| 1233 |
+
"# Step 2: Train Logistic Regression on training set\n",
|
| 1234 |
+
"model = LogisticRegression(max_iter=1000)\n",
|
| 1235 |
+
"model.fit(X_train, y_train)\n",
|
| 1236 |
+
"\n",
|
| 1237 |
+
"# Step 3: Predict on test set\n",
|
| 1238 |
+
"y_preds = model.predict(X_test)\n",
|
| 1239 |
+
"\n",
|
| 1240 |
+
"# Step 4: Evaluate\n",
|
| 1241 |
+
"acc = accuracy_score(y_test, y_preds)\n",
|
| 1242 |
+
"print(f\"✅ Accuracy on Test Set: {acc:.4f}\\n\")\n",
|
| 1243 |
+
"\n",
|
| 1244 |
+
"print(\"✅ Classification Report on Test Set:\\n\")\n",
|
| 1245 |
+
"print(classification_report(y_test, y_preds))\n",
|
| 1246 |
+
"\n",
|
| 1247 |
+
"print(\"✅ Confusion Matrix on Test Set:\\n\")\n",
|
| 1248 |
+
"print(confusion_matrix(y_test, y_preds))\n",
|
| 1249 |
+
"\n",
|
| 1250 |
+
"# --------------------------------\n",
|
| 1251 |
+
"# Step 5: Predict probabilities on full dataset\n",
|
| 1252 |
+
"# --------------------------------\n",
|
| 1253 |
+
"y_proba = model.predict_proba(X)\n",
|
| 1254 |
+
"\n",
|
| 1255 |
+
"# Step 6: Save probabilities\n",
|
| 1256 |
+
"proba_df = pd.DataFrame(y_proba, columns=[f'class_{i}' for i in range(y_proba.shape[1])])\n",
|
| 1257 |
+
"proba_df.insert(0, 'y_true', y)\n",
|
| 1258 |
+
"\n",
|
| 1259 |
+
"proba_df.to_csv('Phase1/Albert.csv', index=False)\n",
|
| 1260 |
+
"\n",
|
| 1261 |
+
"print(\"\\n✅ Saved class probabilities to Phase1/Albert.csv!\")\n",
|
| 1262 |
+
"proba_df.head()\n"
|
| 1263 |
+
]
|
| 1264 |
+
},
|
| 1265 |
+
{
|
| 1266 |
+
"cell_type": "markdown",
|
| 1267 |
+
"id": "8b9c1929",
|
| 1268 |
+
"metadata": {},
|
| 1269 |
+
"source": [
|
| 1270 |
+
"## Ensemble"
|
| 1271 |
+
]
|
| 1272 |
+
},
|
| 1273 |
+
{
|
| 1274 |
+
"cell_type": "code",
|
| 1275 |
+
"execution_count": 41,
|
| 1276 |
+
"id": "4e6433ba",
|
| 1277 |
+
"metadata": {},
|
| 1278 |
+
"outputs": [
|
| 1279 |
+
{
|
| 1280 |
+
"data": {
|
| 1281 |
+
"text/html": [
|
| 1282 |
+
"<div>\n",
|
| 1283 |
+
"<style scoped>\n",
|
| 1284 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 1285 |
+
" vertical-align: middle;\n",
|
| 1286 |
+
" }\n",
|
| 1287 |
+
"\n",
|
| 1288 |
+
" .dataframe tbody tr th {\n",
|
| 1289 |
+
" vertical-align: top;\n",
|
| 1290 |
+
" }\n",
|
| 1291 |
+
"\n",
|
| 1292 |
+
" .dataframe thead th {\n",
|
| 1293 |
+
" text-align: right;\n",
|
| 1294 |
+
" }\n",
|
| 1295 |
+
"</style>\n",
|
| 1296 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 1297 |
+
" <thead>\n",
|
| 1298 |
+
" <tr style=\"text-align: right;\">\n",
|
| 1299 |
+
" <th></th>\n",
|
| 1300 |
+
" <th>y_true</th>\n",
|
| 1301 |
+
" <th>class_0</th>\n",
|
| 1302 |
+
" <th>class_1</th>\n",
|
| 1303 |
+
" <th>class_2</th>\n",
|
| 1304 |
+
" <th>class_3</th>\n",
|
| 1305 |
+
" <th>class_4</th>\n",
|
| 1306 |
+
" <th>class_5</th>\n",
|
| 1307 |
+
" <th>class_6</th>\n",
|
| 1308 |
+
" <th>class_7</th>\n",
|
| 1309 |
+
" <th>class_8</th>\n",
|
| 1310 |
+
" <th>...</th>\n",
|
| 1311 |
+
" <th>class_0</th>\n",
|
| 1312 |
+
" <th>class_1</th>\n",
|
| 1313 |
+
" <th>class_2</th>\n",
|
| 1314 |
+
" <th>class_3</th>\n",
|
| 1315 |
+
" <th>class_4</th>\n",
|
| 1316 |
+
" <th>class_5</th>\n",
|
| 1317 |
+
" <th>class_6</th>\n",
|
| 1318 |
+
" <th>class_7</th>\n",
|
| 1319 |
+
" <th>class_8</th>\n",
|
| 1320 |
+
" <th>class_9</th>\n",
|
| 1321 |
+
" </tr>\n",
|
| 1322 |
+
" </thead>\n",
|
| 1323 |
+
" <tbody>\n",
|
| 1324 |
+
" <tr>\n",
|
| 1325 |
+
" <th>0</th>\n",
|
| 1326 |
+
" <td>2</td>\n",
|
| 1327 |
+
" <td>0.004856</td>\n",
|
| 1328 |
+
" <td>0.009864</td>\n",
|
| 1329 |
+
" <td>0.864422</td>\n",
|
| 1330 |
+
" <td>0.008940</td>\n",
|
| 1331 |
+
" <td>0.009183</td>\n",
|
| 1332 |
+
" <td>0.016639</td>\n",
|
| 1333 |
+
" <td>0.039277</td>\n",
|
| 1334 |
+
" <td>0.026571</td>\n",
|
| 1335 |
+
" <td>0.010824</td>\n",
|
| 1336 |
+
" <td>...</td>\n",
|
| 1337 |
+
" <td>4.046189e-06</td>\n",
|
| 1338 |
+
" <td>1.673384e-06</td>\n",
|
| 1339 |
+
" <td>0.996728</td>\n",
|
| 1340 |
+
" <td>8.242731e-04</td>\n",
|
| 1341 |
+
" <td>0.002150</td>\n",
|
| 1342 |
+
" <td>0.000016</td>\n",
|
| 1343 |
+
" <td>2.555716e-07</td>\n",
|
| 1344 |
+
" <td>8.192644e-05</td>\n",
|
| 1345 |
+
" <td>0.000194</td>\n",
|
| 1346 |
+
" <td>8.416093e-09</td>\n",
|
| 1347 |
+
" </tr>\n",
|
| 1348 |
+
" <tr>\n",
|
| 1349 |
+
" <th>1</th>\n",
|
| 1350 |
+
" <td>2</td>\n",
|
| 1351 |
+
" <td>0.005297</td>\n",
|
| 1352 |
+
" <td>0.006558</td>\n",
|
| 1353 |
+
" <td>0.913401</td>\n",
|
| 1354 |
+
" <td>0.006262</td>\n",
|
| 1355 |
+
" <td>0.011597</td>\n",
|
| 1356 |
+
" <td>0.018983</td>\n",
|
| 1357 |
+
" <td>0.018909</td>\n",
|
| 1358 |
+
" <td>0.006744</td>\n",
|
| 1359 |
+
" <td>0.009302</td>\n",
|
| 1360 |
+
" <td>...</td>\n",
|
| 1361 |
+
" <td>2.162968e-07</td>\n",
|
| 1362 |
+
" <td>8.667423e-07</td>\n",
|
| 1363 |
+
" <td>0.999940</td>\n",
|
| 1364 |
+
" <td>3.937645e-07</td>\n",
|
| 1365 |
+
" <td>0.000004</td>\n",
|
| 1366 |
+
" <td>0.000004</td>\n",
|
| 1367 |
+
" <td>6.603543e-08</td>\n",
|
| 1368 |
+
" <td>2.003625e-07</td>\n",
|
| 1369 |
+
" <td>0.000051</td>\n",
|
| 1370 |
+
" <td>2.772808e-09</td>\n",
|
| 1371 |
+
" </tr>\n",
|
| 1372 |
+
" <tr>\n",
|
| 1373 |
+
" <th>2</th>\n",
|
| 1374 |
+
" <td>2</td>\n",
|
| 1375 |
+
" <td>0.030078</td>\n",
|
| 1376 |
+
" <td>0.035363</td>\n",
|
| 1377 |
+
" <td>0.681136</td>\n",
|
| 1378 |
+
" <td>0.038987</td>\n",
|
| 1379 |
+
" <td>0.025477</td>\n",
|
| 1380 |
+
" <td>0.060935</td>\n",
|
| 1381 |
+
" <td>0.024796</td>\n",
|
| 1382 |
+
" <td>0.033198</td>\n",
|
| 1383 |
+
" <td>0.045627</td>\n",
|
| 1384 |
+
" <td>...</td>\n",
|
| 1385 |
+
" <td>1.016335e-04</td>\n",
|
| 1386 |
+
" <td>3.334410e-04</td>\n",
|
| 1387 |
+
" <td>0.998280</td>\n",
|
| 1388 |
+
" <td>5.001128e-05</td>\n",
|
| 1389 |
+
" <td>0.000006</td>\n",
|
| 1390 |
+
" <td>0.000730</td>\n",
|
| 1391 |
+
" <td>2.283566e-07</td>\n",
|
| 1392 |
+
" <td>1.428000e-06</td>\n",
|
| 1393 |
+
" <td>0.000497</td>\n",
|
| 1394 |
+
" <td>7.577643e-07</td>\n",
|
| 1395 |
+
" </tr>\n",
|
| 1396 |
+
" <tr>\n",
|
| 1397 |
+
" <th>3</th>\n",
|
| 1398 |
+
" <td>2</td>\n",
|
| 1399 |
+
" <td>0.014440</td>\n",
|
| 1400 |
+
" <td>0.015472</td>\n",
|
| 1401 |
+
" <td>0.790973</td>\n",
|
| 1402 |
+
" <td>0.023952</td>\n",
|
| 1403 |
+
" <td>0.023529</td>\n",
|
| 1404 |
+
" <td>0.020345</td>\n",
|
| 1405 |
+
" <td>0.037677</td>\n",
|
| 1406 |
+
" <td>0.027091</td>\n",
|
| 1407 |
+
" <td>0.030567</td>\n",
|
| 1408 |
+
" <td>...</td>\n",
|
| 1409 |
+
" <td>1.920308e-03</td>\n",
|
| 1410 |
+
" <td>1.006953e-02</td>\n",
|
| 1411 |
+
" <td>0.955455</td>\n",
|
| 1412 |
+
" <td>2.492423e-03</td>\n",
|
| 1413 |
+
" <td>0.006655</td>\n",
|
| 1414 |
+
" <td>0.000183</td>\n",
|
| 1415 |
+
" <td>3.224371e-04</td>\n",
|
| 1416 |
+
" <td>1.568429e-02</td>\n",
|
| 1417 |
+
" <td>0.006673</td>\n",
|
| 1418 |
+
" <td>5.461269e-04</td>\n",
|
| 1419 |
+
" </tr>\n",
|
| 1420 |
+
" <tr>\n",
|
| 1421 |
+
" <th>4</th>\n",
|
| 1422 |
+
" <td>2</td>\n",
|
| 1423 |
+
" <td>0.012646</td>\n",
|
| 1424 |
+
" <td>0.019750</td>\n",
|
| 1425 |
+
" <td>0.612334</td>\n",
|
| 1426 |
+
" <td>0.116218</td>\n",
|
| 1427 |
+
" <td>0.036532</td>\n",
|
| 1428 |
+
" <td>0.027983</td>\n",
|
| 1429 |
+
" <td>0.045918</td>\n",
|
| 1430 |
+
" <td>0.060600</td>\n",
|
| 1431 |
+
" <td>0.036102</td>\n",
|
| 1432 |
+
" <td>...</td>\n",
|
| 1433 |
+
" <td>2.279302e-04</td>\n",
|
| 1434 |
+
" <td>3.630354e-05</td>\n",
|
| 1435 |
+
" <td>0.944598</td>\n",
|
| 1436 |
+
" <td>4.439209e-02</td>\n",
|
| 1437 |
+
" <td>0.002457</td>\n",
|
| 1438 |
+
" <td>0.000046</td>\n",
|
| 1439 |
+
" <td>6.339463e-06</td>\n",
|
| 1440 |
+
" <td>7.960528e-03</td>\n",
|
| 1441 |
+
" <td>0.000233</td>\n",
|
| 1442 |
+
" <td>4.337906e-05</td>\n",
|
| 1443 |
+
" </tr>\n",
|
| 1444 |
+
" </tbody>\n",
|
| 1445 |
+
"</table>\n",
|
| 1446 |
+
"<p>5 rows × 41 columns</p>\n",
|
| 1447 |
+
"</div>"
|
| 1448 |
+
],
|
| 1449 |
+
"text/plain": [
|
| 1450 |
+
" y_true class_0 class_1 class_2 class_3 class_4 class_5 \\\n",
|
| 1451 |
+
"0 2 0.004856 0.009864 0.864422 0.008940 0.009183 0.016639 \n",
|
| 1452 |
+
"1 2 0.005297 0.006558 0.913401 0.006262 0.011597 0.018983 \n",
|
| 1453 |
+
"2 2 0.030078 0.035363 0.681136 0.038987 0.025477 0.060935 \n",
|
| 1454 |
+
"3 2 0.014440 0.015472 0.790973 0.023952 0.023529 0.020345 \n",
|
| 1455 |
+
"4 2 0.012646 0.019750 0.612334 0.116218 0.036532 0.027983 \n",
|
| 1456 |
+
"\n",
|
| 1457 |
+
" class_6 class_7 class_8 ... class_0 class_1 class_2 \\\n",
|
| 1458 |
+
"0 0.039277 0.026571 0.010824 ... 4.046189e-06 1.673384e-06 0.996728 \n",
|
| 1459 |
+
"1 0.018909 0.006744 0.009302 ... 2.162968e-07 8.667423e-07 0.999940 \n",
|
| 1460 |
+
"2 0.024796 0.033198 0.045627 ... 1.016335e-04 3.334410e-04 0.998280 \n",
|
| 1461 |
+
"3 0.037677 0.027091 0.030567 ... 1.920308e-03 1.006953e-02 0.955455 \n",
|
| 1462 |
+
"4 0.045918 0.060600 0.036102 ... 2.279302e-04 3.630354e-05 0.944598 \n",
|
| 1463 |
+
"\n",
|
| 1464 |
+
" class_3 class_4 class_5 class_6 class_7 class_8 \\\n",
|
| 1465 |
+
"0 8.242731e-04 0.002150 0.000016 2.555716e-07 8.192644e-05 0.000194 \n",
|
| 1466 |
+
"1 3.937645e-07 0.000004 0.000004 6.603543e-08 2.003625e-07 0.000051 \n",
|
| 1467 |
+
"2 5.001128e-05 0.000006 0.000730 2.283566e-07 1.428000e-06 0.000497 \n",
|
| 1468 |
+
"3 2.492423e-03 0.006655 0.000183 3.224371e-04 1.568429e-02 0.006673 \n",
|
| 1469 |
+
"4 4.439209e-02 0.002457 0.000046 6.339463e-06 7.960528e-03 0.000233 \n",
|
| 1470 |
+
"\n",
|
| 1471 |
+
" class_9 \n",
|
| 1472 |
+
"0 8.416093e-09 \n",
|
| 1473 |
+
"1 2.772808e-09 \n",
|
| 1474 |
+
"2 7.577643e-07 \n",
|
| 1475 |
+
"3 5.461269e-04 \n",
|
| 1476 |
+
"4 4.337906e-05 \n",
|
| 1477 |
+
"\n",
|
| 1478 |
+
"[5 rows x 41 columns]"
|
| 1479 |
+
]
|
| 1480 |
+
},
|
| 1481 |
+
"execution_count": 41,
|
| 1482 |
+
"metadata": {},
|
| 1483 |
+
"output_type": "execute_result"
|
| 1484 |
+
}
|
| 1485 |
+
],
|
| 1486 |
+
"source": [
|
| 1487 |
+
"import pandas as pd\n",
|
| 1488 |
+
"\n",
|
| 1489 |
+
"# Read the CSV files\n",
|
| 1490 |
+
"tfidf_df = pd.read_csv('Phase1/TFIDF.csv')\n",
|
| 1491 |
+
"word2vec_df = pd.read_csv('Phase1/Word2Vec.csv')\n",
|
| 1492 |
+
"glove_df = pd.read_csv('Phase1/GloVe.csv')\n",
|
| 1493 |
+
"albert_df = pd.read_csv('Phase1/Albert.csv')\n",
|
| 1494 |
+
"\n",
|
| 1495 |
+
"# Concatenate the DataFrames along the columns (axis=1)\n",
|
| 1496 |
+
"combined_df = pd.concat([tfidf_df, word2vec_df.drop([\"y_true\"], axis=1), glove_df.drop([\"y_true\"], axis=1), albert_df.drop([\"y_true\"], axis=1)], axis=1)\n",
|
| 1497 |
+
"\n",
|
| 1498 |
+
"# Optionally, save the combined DataFrame to a new CSV file\n",
|
| 1499 |
+
"# combined_df.to_csv('Phase1/Combined.csv', index=False)\n",
|
| 1500 |
+
"\n",
|
| 1501 |
+
"# Display the combined DataFrame\n",
|
| 1502 |
+
"combined_df.head()\n"
|
| 1503 |
+
]
|
| 1504 |
+
},
|
| 1505 |
+
{
|
| 1506 |
+
"cell_type": "code",
|
| 1507 |
+
"execution_count": 42,
|
| 1508 |
+
"id": "cfd48211",
|
| 1509 |
+
"metadata": {},
|
| 1510 |
+
"outputs": [],
|
| 1511 |
+
"source": [
|
| 1512 |
+
"combined_df.to_csv('Phase1/Combined.csv', index=False)"
|
| 1513 |
+
]
|
| 1514 |
+
},
|
| 1515 |
+
{
|
| 1516 |
+
"cell_type": "code",
|
| 1517 |
+
"execution_count": 43,
|
| 1518 |
+
"id": "3a6d7971",
|
| 1519 |
+
"metadata": {},
|
| 1520 |
+
"outputs": [
|
| 1521 |
+
{
|
| 1522 |
+
"data": {
|
| 1523 |
+
"text/plain": [
|
| 1524 |
+
"35251"
|
| 1525 |
+
]
|
| 1526 |
+
},
|
| 1527 |
+
"execution_count": 43,
|
| 1528 |
+
"metadata": {},
|
| 1529 |
+
"output_type": "execute_result"
|
| 1530 |
+
}
|
| 1531 |
+
],
|
| 1532 |
+
"source": [
|
| 1533 |
+
"len(combined_df)"
|
| 1534 |
+
]
|
| 1535 |
+
},
|
| 1536 |
+
{
|
| 1537 |
+
"cell_type": "code",
|
| 1538 |
+
"execution_count": 44,
|
| 1539 |
+
"id": "eaa4cbff",
|
| 1540 |
+
"metadata": {},
|
| 1541 |
+
"outputs": [],
|
| 1542 |
+
"source": [
|
| 1543 |
+
"X = combined_df.drop([\"y_true\"], axis =1 )\n",
|
| 1544 |
+
"y = combined_df[\"y_true\"]"
|
| 1545 |
+
]
|
| 1546 |
+
},
|
| 1547 |
+
{
|
| 1548 |
+
"cell_type": "code",
|
| 1549 |
+
"execution_count": 45,
|
| 1550 |
+
"id": "b7ed0d29",
|
| 1551 |
+
"metadata": {},
|
| 1552 |
+
"outputs": [
|
| 1553 |
+
{
|
| 1554 |
+
"name": "stdout",
|
| 1555 |
+
"output_type": "stream",
|
| 1556 |
+
"text": [
|
| 1557 |
+
"Logistic Regression:\n",
|
| 1558 |
+
"Accuracy: 0.7984683023684583\n",
|
| 1559 |
+
" precision recall f1-score support\n",
|
| 1560 |
+
"\n",
|
| 1561 |
+
" 0 0.76 0.79 0.77 792\n",
|
| 1562 |
+
" 1 0.69 0.71 0.70 709\n",
|
| 1563 |
+
" 2 0.84 0.82 0.83 729\n",
|
| 1564 |
+
" 3 0.76 0.88 0.82 830\n",
|
| 1565 |
+
" 4 0.84 0.74 0.79 486\n",
|
| 1566 |
+
" 5 0.87 0.88 0.88 680\n",
|
| 1567 |
+
" 6 0.96 0.98 0.97 803\n",
|
| 1568 |
+
" 7 0.71 0.66 0.68 634\n",
|
| 1569 |
+
" 8 0.70 0.57 0.63 704\n",
|
| 1570 |
+
" 9 0.83 0.88 0.86 684\n",
|
| 1571 |
+
"\n",
|
| 1572 |
+
" accuracy 0.80 7051\n",
|
| 1573 |
+
" macro avg 0.80 0.79 0.79 7051\n",
|
| 1574 |
+
"weighted avg 0.80 0.80 0.80 7051\n",
|
| 1575 |
+
"\n",
|
| 1576 |
+
"\n",
|
| 1577 |
+
"Decision Tree Classifier:\n"
|
| 1578 |
+
]
|
| 1579 |
+
},
|
| 1580 |
+
{
|
| 1581 |
+
"name": "stderr",
|
| 1582 |
+
"output_type": "stream",
|
| 1583 |
+
"text": [
|
| 1584 |
+
"/home/darth/.pyenv/versions/major02/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py:1247: FutureWarning: 'multi_class' was deprecated in version 1.5 and will be removed in 1.7. From then on, it will always use 'multinomial'. Leave it to its default value to avoid this warning.\n",
|
| 1585 |
+
" warnings.warn(\n"
|
| 1586 |
+
]
|
| 1587 |
+
},
|
| 1588 |
+
{
|
| 1589 |
+
"name": "stdout",
|
| 1590 |
+
"output_type": "stream",
|
| 1591 |
+
"text": [
|
| 1592 |
+
"Accuracy: 0.4355410580059566\n",
|
| 1593 |
+
" precision recall f1-score support\n",
|
| 1594 |
+
"\n",
|
| 1595 |
+
" 0 0.56 0.52 0.54 792\n",
|
| 1596 |
+
" 1 0.43 0.39 0.41 709\n",
|
| 1597 |
+
" 2 0.34 0.33 0.34 729\n",
|
| 1598 |
+
" 3 0.44 0.50 0.47 830\n",
|
| 1599 |
+
" 4 0.52 0.31 0.39 486\n",
|
| 1600 |
+
" 5 0.33 0.46 0.39 680\n",
|
| 1601 |
+
" 6 0.54 0.68 0.60 803\n",
|
| 1602 |
+
" 7 0.33 0.34 0.34 634\n",
|
| 1603 |
+
" 8 0.29 0.27 0.28 704\n",
|
| 1604 |
+
" 9 0.68 0.46 0.55 684\n",
|
| 1605 |
+
"\n",
|
| 1606 |
+
" accuracy 0.44 7051\n",
|
| 1607 |
+
" macro avg 0.45 0.43 0.43 7051\n",
|
| 1608 |
+
"weighted avg 0.45 0.44 0.43 7051\n",
|
| 1609 |
+
"\n",
|
| 1610 |
+
"\n",
|
| 1611 |
+
"Random Forest Classifier:\n",
|
| 1612 |
+
"Accuracy: 0.5664444759608566\n",
|
| 1613 |
+
" precision recall f1-score support\n",
|
| 1614 |
+
"\n",
|
| 1615 |
+
" 0 0.51 0.57 0.54 792\n",
|
| 1616 |
+
" 1 0.37 0.35 0.36 709\n",
|
| 1617 |
+
" 2 0.56 0.56 0.56 729\n",
|
| 1618 |
+
" 3 0.57 0.65 0.60 830\n",
|
| 1619 |
+
" 4 0.63 0.48 0.55 486\n",
|
| 1620 |
+
" 5 0.65 0.63 0.64 680\n",
|
| 1621 |
+
" 6 0.83 0.97 0.90 803\n",
|
| 1622 |
+
" 7 0.47 0.42 0.44 634\n",
|
| 1623 |
+
" 8 0.40 0.30 0.34 704\n",
|
| 1624 |
+
" 9 0.58 0.62 0.60 684\n",
|
| 1625 |
+
"\n",
|
| 1626 |
+
" accuracy 0.57 7051\n",
|
| 1627 |
+
" macro avg 0.56 0.56 0.55 7051\n",
|
| 1628 |
+
"weighted avg 0.56 0.57 0.56 7051\n",
|
| 1629 |
+
"\n",
|
| 1630 |
+
"\n",
|
| 1631 |
+
"Support Vector Machine (SVM):\n",
|
| 1632 |
+
"Accuracy: 0.7730818323642037\n",
|
| 1633 |
+
" precision recall f1-score support\n",
|
| 1634 |
+
"\n",
|
| 1635 |
+
" 0 0.73 0.76 0.74 792\n",
|
| 1636 |
+
" 1 0.64 0.70 0.67 709\n",
|
| 1637 |
+
" 2 0.81 0.80 0.80 729\n",
|
| 1638 |
+
" 3 0.73 0.86 0.79 830\n",
|
| 1639 |
+
" 4 0.81 0.64 0.72 486\n",
|
| 1640 |
+
" 5 0.88 0.85 0.86 680\n",
|
| 1641 |
+
" 6 0.94 0.99 0.96 803\n",
|
| 1642 |
+
" 7 0.70 0.67 0.69 634\n",
|
| 1643 |
+
" 8 0.66 0.53 0.59 704\n",
|
| 1644 |
+
" 9 0.82 0.86 0.84 684\n",
|
| 1645 |
+
"\n",
|
| 1646 |
+
" accuracy 0.77 7051\n",
|
| 1647 |
+
" macro avg 0.77 0.76 0.77 7051\n",
|
| 1648 |
+
"weighted avg 0.77 0.77 0.77 7051\n",
|
| 1649 |
+
"\n",
|
| 1650 |
+
"\n",
|
| 1651 |
+
"k-Nearest Neighbors (k-NN):\n",
|
| 1652 |
+
"Accuracy: 0.83562615231882\n",
|
| 1653 |
+
" precision recall f1-score support\n",
|
| 1654 |
+
"\n",
|
| 1655 |
+
" 0 0.81 0.86 0.83 792\n",
|
| 1656 |
+
" 1 0.71 0.77 0.74 709\n",
|
| 1657 |
+
" 2 0.88 0.86 0.87 729\n",
|
| 1658 |
+
" 3 0.82 0.91 0.86 830\n",
|
| 1659 |
+
" 4 0.87 0.77 0.81 486\n",
|
| 1660 |
+
" 5 0.91 0.88 0.90 680\n",
|
| 1661 |
+
" 6 0.97 0.99 0.98 803\n",
|
| 1662 |
+
" 7 0.75 0.75 0.75 634\n",
|
| 1663 |
+
" 8 0.73 0.62 0.67 704\n",
|
| 1664 |
+
" 9 0.90 0.89 0.90 684\n",
|
| 1665 |
+
"\n",
|
| 1666 |
+
" accuracy 0.84 7051\n",
|
| 1667 |
+
" macro avg 0.84 0.83 0.83 7051\n",
|
| 1668 |
+
"weighted avg 0.84 0.84 0.83 7051\n",
|
| 1669 |
+
"\n",
|
| 1670 |
+
"\n",
|
| 1671 |
+
"Naïve Bayes Classifier:\n",
|
| 1672 |
+
"Accuracy: 0.882428024393703\n",
|
| 1673 |
+
" precision recall f1-score support\n",
|
| 1674 |
+
"\n",
|
| 1675 |
+
" 0 0.90 0.85 0.88 792\n",
|
| 1676 |
+
" 1 0.78 0.82 0.80 709\n",
|
| 1677 |
+
" 2 0.93 0.88 0.91 729\n",
|
| 1678 |
+
" 3 0.90 0.91 0.90 830\n",
|
| 1679 |
+
" 4 0.88 0.84 0.86 486\n",
|
| 1680 |
+
" 5 0.93 0.94 0.93 680\n",
|
| 1681 |
+
" 6 0.98 0.99 0.98 803\n",
|
| 1682 |
+
" 7 0.81 0.83 0.82 634\n",
|
| 1683 |
+
" 8 0.77 0.80 0.78 704\n",
|
| 1684 |
+
" 9 0.94 0.92 0.93 684\n",
|
| 1685 |
+
"\n",
|
| 1686 |
+
" accuracy 0.88 7051\n",
|
| 1687 |
+
" macro avg 0.88 0.88 0.88 7051\n",
|
| 1688 |
+
"weighted avg 0.88 0.88 0.88 7051\n",
|
| 1689 |
+
"\n"
|
| 1690 |
+
]
|
| 1691 |
+
}
|
| 1692 |
+
],
|
| 1693 |
+
"source": [
|
| 1694 |
+
"from models import *\n",
|
| 1695 |
+
"print(\"Logistic Regression:\")\n",
|
| 1696 |
+
"report, acc = logistic_regression(X, y)\n",
|
| 1697 |
+
"print(\"Accuracy:\", acc)\n",
|
| 1698 |
+
"print(report)\n",
|
| 1699 |
+
"\n",
|
| 1700 |
+
"print(\"\\nDecision Tree Classifier:\")\n",
|
| 1701 |
+
"report, acc = decision_tree(X, y)\n",
|
| 1702 |
+
"print(\"Accuracy:\", acc)\n",
|
| 1703 |
+
"print(report)\n",
|
| 1704 |
+
"\n",
|
| 1705 |
+
"print(\"\\nRandom Forest Classifier:\")\n",
|
| 1706 |
+
"report, acc = random_forest(X, y)\n",
|
| 1707 |
+
"print(\"Accuracy:\", acc)\n",
|
| 1708 |
+
"print(report)\n",
|
| 1709 |
+
"\n",
|
| 1710 |
+
"print(\"\\nSupport Vector Machine (SVM):\")\n",
|
| 1711 |
+
"report, acc = support_vector_machine(X, y)\n",
|
| 1712 |
+
"print(\"Accuracy:\", acc)\n",
|
| 1713 |
+
"print(report)\n",
|
| 1714 |
+
"\n",
|
| 1715 |
+
"print(\"\\nk-Nearest Neighbors (k-NN):\")\n",
|
| 1716 |
+
"report, acc = knn(X, y, k=5)\n",
|
| 1717 |
+
"print(\"Accuracy:\", acc)\n",
|
| 1718 |
+
"print(report)\n",
|
| 1719 |
+
"\n",
|
| 1720 |
+
"print(\"\\nNaïve Bayes Classifier:\")\n",
|
| 1721 |
+
"report, acc = naive_bayes(X, y)\n",
|
| 1722 |
+
"print(\"Accuracy:\", acc)\n",
|
| 1723 |
+
"print(report)\n",
|
| 1724 |
+
"\n",
|
| 1725 |
+
"# print(\"\\nGradient Boosting (XGBoost):\")\n",
|
| 1726 |
+
"# report, acc = xgboost_classifier(X, y)\n",
|
| 1727 |
+
"# print(\"Accuracy:\", acc)\n",
|
| 1728 |
+
"# print(report)\n",
|
| 1729 |
+
"\n",
|
| 1730 |
+
"# print(\"\\nMulti-Layer Perceptron (MLP - Neural Network):\")\n",
|
| 1731 |
+
"# report, acc = mlp_classifier(X, y)\n",
|
| 1732 |
+
"\n",
|
| 1733 |
+
"# print(\"Accuracy:\", acc)\n",
|
| 1734 |
+
"# print(report)\n"
|
| 1735 |
+
]
|
| 1736 |
+
},
|
| 1737 |
+
{
|
| 1738 |
+
"cell_type": "code",
|
| 1739 |
+
"execution_count": null,
|
| 1740 |
+
"id": "fd8e808d",
|
| 1741 |
+
"metadata": {},
|
| 1742 |
+
"outputs": [],
|
| 1743 |
+
"source": []
|
| 1744 |
+
}
|
| 1745 |
+
],
|
| 1746 |
+
"metadata": {
|
| 1747 |
+
"kernelspec": {
|
| 1748 |
+
"display_name": "major02",
|
| 1749 |
+
"language": "python",
|
| 1750 |
+
"name": "python3"
|
| 1751 |
+
},
|
| 1752 |
+
"language_info": {
|
| 1753 |
+
"codemirror_mode": {
|
| 1754 |
+
"name": "ipython",
|
| 1755 |
+
"version": 3
|
| 1756 |
+
},
|
| 1757 |
+
"file_extension": ".py",
|
| 1758 |
+
"mimetype": "text/x-python",
|
| 1759 |
+
"name": "python",
|
| 1760 |
+
"nbconvert_exporter": "python",
|
| 1761 |
+
"pygments_lexer": "ipython3",
|
| 1762 |
+
"version": "3.10.12"
|
| 1763 |
+
}
|
| 1764 |
+
},
|
| 1765 |
+
"nbformat": 4,
|
| 1766 |
+
"nbformat_minor": 5
|
| 1767 |
+
}
|
major-mid-report.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5f7088e3b28b5c14889d50b88b9ad40e489e55f2529690b23e04b0378a5c39fd
|
| 3 |
+
size 653546
|
models.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from sklearn.model_selection import train_test_split
|
| 4 |
+
from sklearn.preprocessing import StandardScaler
|
| 5 |
+
from sklearn.metrics import classification_report, accuracy_score
|
| 6 |
+
|
| 7 |
+
# Importing Classifiers
|
| 8 |
+
from sklearn.linear_model import LogisticRegression
|
| 9 |
+
from sklearn.tree import DecisionTreeClassifier
|
| 10 |
+
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
|
| 11 |
+
from sklearn.svm import SVC
|
| 12 |
+
from sklearn.neighbors import KNeighborsClassifier
|
| 13 |
+
from sklearn.naive_bayes import MultinomialNB
|
| 14 |
+
from sklearn.neural_network import MLPClassifier
|
| 15 |
+
from xgboost import XGBClassifier
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Function to preprocess data (optional scaling for certain models)
|
| 19 |
+
def preprocess_data(X, scale=False):
|
| 20 |
+
if scale:
|
| 21 |
+
scaler = StandardScaler()
|
| 22 |
+
X = scaler.fit_transform(X)
|
| 23 |
+
return X
|
| 24 |
+
|
| 25 |
+
# Logistic Regression
|
| 26 |
+
def logistic_regression(X, y):
|
| 27 |
+
# X = preprocess_data(X, scale=True)
|
| 28 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 29 |
+
|
| 30 |
+
model = LogisticRegression(multi_class='multinomial', solver='lbfgs', max_iter=1000)
|
| 31 |
+
model.fit(X_train, y_train)
|
| 32 |
+
|
| 33 |
+
y_pred = model.predict(X_test)
|
| 34 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 35 |
+
|
| 36 |
+
# Decision Tree Classifier
|
| 37 |
+
def decision_tree(X, y):
|
| 38 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 39 |
+
|
| 40 |
+
model = DecisionTreeClassifier()
|
| 41 |
+
model.fit(X_train, y_train)
|
| 42 |
+
|
| 43 |
+
y_pred = model.predict(X_test)
|
| 44 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 45 |
+
|
| 46 |
+
# Random Forest Classifier
|
| 47 |
+
def random_forest(X, y):
|
| 48 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 49 |
+
|
| 50 |
+
model = RandomForestClassifier(n_estimators=100, random_state=42)
|
| 51 |
+
model.fit(X_train, y_train)
|
| 52 |
+
|
| 53 |
+
y_pred = model.predict(X_test)
|
| 54 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 55 |
+
|
| 56 |
+
# Support Vector Machine (SVM)
|
| 57 |
+
def support_vector_machine(X, y):
|
| 58 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 59 |
+
|
| 60 |
+
model = SVC(kernel='rbf', probability=True) # Enable progress output
|
| 61 |
+
model.fit(X_train, y_train)
|
| 62 |
+
|
| 63 |
+
y_pred = model.predict(X_test)
|
| 64 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 65 |
+
|
| 66 |
+
# k-Nearest Neighbors (k-NN)
|
| 67 |
+
def knn(X, y, k=5):
|
| 68 |
+
# X = preprocess_data(X, scale=True)
|
| 69 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 70 |
+
|
| 71 |
+
model = KNeighborsClassifier(n_neighbors=k)
|
| 72 |
+
model.fit(X_train, y_train)
|
| 73 |
+
|
| 74 |
+
y_pred = model.predict(X_test)
|
| 75 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 76 |
+
|
| 77 |
+
# Naïve Bayes
|
| 78 |
+
def naive_bayes(X, y):
|
| 79 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 80 |
+
|
| 81 |
+
model = MultinomialNB()
|
| 82 |
+
|
| 83 |
+
model.fit(X_train, y_train)
|
| 84 |
+
|
| 85 |
+
y_pred = model.predict(X_test)
|
| 86 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 87 |
+
|
| 88 |
+
# Gradient Boosting Classifier (XGBoost)
|
| 89 |
+
def xgboost_classifier(X, y):
|
| 90 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 91 |
+
|
| 92 |
+
model = XGBClassifier(use_label_encoder=False, eval_metric='mlogloss')
|
| 93 |
+
model.fit(X_train, y_train)
|
| 94 |
+
|
| 95 |
+
y_pred = model.predict(X_test)
|
| 96 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 97 |
+
|
| 98 |
+
# Multi-Layer Perceptron (MLP - Neural Network)
|
| 99 |
+
# def mlp_classifier(X, y):
|
| 100 |
+
# # X = preprocess_data(X, scale=True)
|
| 101 |
+
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 102 |
+
|
| 103 |
+
# model = MLPClassifier(hidden_layer_sizes=(100, 50), max_iter=500, random_state=42)
|
| 104 |
+
# model.fit(X_train, y_train)
|
| 105 |
+
|
| 106 |
+
# y_pred = model.predict(X_test)
|
| 107 |
+
# return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 108 |
+
import torch
|
| 109 |
+
import torch.nn as nn
|
| 110 |
+
import torch.optim as optim
|
| 111 |
+
from sklearn.model_selection import train_test_split
|
| 112 |
+
from sklearn.preprocessing import StandardScaler
|
| 113 |
+
from sklearn.metrics import classification_report, accuracy_score
|
| 114 |
+
|
| 115 |
+
def mlp_classifier(X, y, epochs=500, lr=0.01):
|
| 116 |
+
# Normalize data without centering (for sparse matrices)
|
| 117 |
+
scaler = StandardScaler(with_mean=False) # Fix applied here
|
| 118 |
+
X = scaler.fit_transform(X)
|
| 119 |
+
|
| 120 |
+
# Convert to PyTorch tensors
|
| 121 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 122 |
+
X_train, X_test = torch.tensor(X_train.toarray(), dtype=torch.float32), torch.tensor(X_test.toarray(), dtype=torch.float32)
|
| 123 |
+
y_train, y_test = torch.tensor(y_train, dtype=torch.long), torch.tensor(y_test, dtype=torch.long)
|
| 124 |
+
|
| 125 |
+
# Move to GPU if available
|
| 126 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 127 |
+
X_train, X_test, y_train, y_test = X_train.to(device), X_test.to(device), y_train.to(device), y_test.to(device)
|
| 128 |
+
|
| 129 |
+
# Define MLP model
|
| 130 |
+
class MLP(nn.Module):
|
| 131 |
+
def __init__(self, input_size, hidden_sizes, output_size):
|
| 132 |
+
super(MLP, self).__init__()
|
| 133 |
+
self.fc1 = nn.Linear(input_size, hidden_sizes[0])
|
| 134 |
+
self.fc2 = nn.Linear(hidden_sizes[0], hidden_sizes[1])
|
| 135 |
+
self.fc3 = nn.Linear(hidden_sizes[1], output_size)
|
| 136 |
+
self.relu = nn.ReLU()
|
| 137 |
+
|
| 138 |
+
def forward(self, x):
|
| 139 |
+
x = self.relu(self.fc1(x))
|
| 140 |
+
x = self.relu(self.fc2(x))
|
| 141 |
+
x = self.fc3(x)
|
| 142 |
+
return x
|
| 143 |
+
|
| 144 |
+
# Initialize model
|
| 145 |
+
model = MLP(input_size=X.shape[1], hidden_sizes=[100, 50], output_size=len(set(y))).to(device)
|
| 146 |
+
|
| 147 |
+
# Loss function and optimizer
|
| 148 |
+
criterion = nn.CrossEntropyLoss()
|
| 149 |
+
optimizer = optim.Adam(model.parameters(), lr=lr)
|
| 150 |
+
|
| 151 |
+
# Train model
|
| 152 |
+
for epoch in range(epochs):
|
| 153 |
+
model.train()
|
| 154 |
+
optimizer.zero_grad()
|
| 155 |
+
outputs = model(X_train)
|
| 156 |
+
loss = criterion(outputs, y_train)
|
| 157 |
+
loss.backward()
|
| 158 |
+
optimizer.step()
|
| 159 |
+
|
| 160 |
+
# Evaluate model
|
| 161 |
+
model.eval()
|
| 162 |
+
with torch.no_grad():
|
| 163 |
+
y_pred = model(X_test)
|
| 164 |
+
y_pred = torch.argmax(y_pred, dim=1).cpu().numpy()
|
| 165 |
+
|
| 166 |
+
# Return classification report and accuracy score
|
| 167 |
+
return classification_report(y_test.cpu().numpy(), y_pred), accuracy_score(y_test.cpu().numpy(), y_pred)
|
| 168 |
+
|
| 169 |
+
# Example usage:
|
| 170 |
+
# report, acc = logistic_regression(X, y)
|
| 171 |
+
# print("Accuracy:", acc)
|
| 172 |
+
# print(report)
|
multi-label-processing.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
multi_label_models.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn.multioutput import MultiOutputClassifier
|
| 2 |
+
from sklearn.multiclass import OneVsRestClassifier
|
| 3 |
+
from sklearn.metrics import classification_report, accuracy_score
|
| 4 |
+
from sklearn.model_selection import train_test_split
|
| 5 |
+
from sklearn.linear_model import LogisticRegression
|
| 6 |
+
from sklearn.tree import DecisionTreeClassifier
|
| 7 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 8 |
+
from sklearn.neighbors import KNeighborsClassifier
|
| 9 |
+
from sklearn.naive_bayes import MultinomialNB
|
| 10 |
+
from sklearn.svm import SVC
|
| 11 |
+
from xgboost import XGBClassifier
|
| 12 |
+
from sklearn.neural_network import MLPClassifier
|
| 13 |
+
import numpy as np
|
| 14 |
+
import pandas as pd
|
| 15 |
+
|
| 16 |
+
# Logistic Regression (use OneVsRest)
|
| 17 |
+
def multilabel_logistic_regression(X, y):
|
| 18 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 19 |
+
|
| 20 |
+
model = OneVsRestClassifier(LogisticRegression(solver='lbfgs', max_iter=1000))
|
| 21 |
+
model.fit(X_train, y_train)
|
| 22 |
+
|
| 23 |
+
y_pred = model.predict(X_test)
|
| 24 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 25 |
+
|
| 26 |
+
# Decision Tree
|
| 27 |
+
def multilabel_decision_tree(X, y):
|
| 28 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 29 |
+
|
| 30 |
+
model = MultiOutputClassifier(DecisionTreeClassifier())
|
| 31 |
+
model.fit(X_train, y_train)
|
| 32 |
+
|
| 33 |
+
y_pred = model.predict(X_test)
|
| 34 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 35 |
+
|
| 36 |
+
# Random Forest
|
| 37 |
+
def multilabel_random_forest(X, y):
|
| 38 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 39 |
+
|
| 40 |
+
model = MultiOutputClassifier(RandomForestClassifier(n_estimators=100, random_state=42))
|
| 41 |
+
model.fit(X_train, y_train)
|
| 42 |
+
|
| 43 |
+
y_pred = model.predict(X_test)
|
| 44 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 45 |
+
|
| 46 |
+
# SVM (with OneVsRest)
|
| 47 |
+
def multilabel_svm(X, y):
|
| 48 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 49 |
+
|
| 50 |
+
model = OneVsRestClassifier(SVC(kernel='rbf', probability=True))
|
| 51 |
+
model.fit(X_train, y_train)
|
| 52 |
+
|
| 53 |
+
y_pred = model.predict(X_test)
|
| 54 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 55 |
+
|
| 56 |
+
# k-NN (KNeighborsClassifier supports multi-label directly)
|
| 57 |
+
def multilabel_knn(X, y, k=5):
|
| 58 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 59 |
+
|
| 60 |
+
model = KNeighborsClassifier(n_neighbors=k)
|
| 61 |
+
model.fit(X_train, y_train)
|
| 62 |
+
|
| 63 |
+
y_pred = model.predict(X_test)
|
| 64 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 65 |
+
|
| 66 |
+
# Naive Bayes (MultinomialNB with OneVsRest)
|
| 67 |
+
def multilabel_naive_bayes(X, y):
|
| 68 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 69 |
+
|
| 70 |
+
model = OneVsRestClassifier(MultinomialNB())
|
| 71 |
+
model.fit(X_train, y_train)
|
| 72 |
+
|
| 73 |
+
y_pred = model.predict(X_test)
|
| 74 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 75 |
+
|
| 76 |
+
# XGBoost (with OneVsRest)
|
| 77 |
+
def multilabel_xgboost(X, y):
|
| 78 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 79 |
+
|
| 80 |
+
model = OneVsRestClassifier(XGBClassifier(use_label_encoder=False, eval_metric='logloss'))
|
| 81 |
+
model.fit(X_train, y_train)
|
| 82 |
+
|
| 83 |
+
y_pred = model.predict(X_test)
|
| 84 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
| 85 |
+
|
| 86 |
+
# MLP (Neural Net)
|
| 87 |
+
def multilabel_mlp(X, y):
|
| 88 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 89 |
+
|
| 90 |
+
model = MultiOutputClassifier(MLPClassifier(hidden_layer_sizes=(100,), max_iter=500))
|
| 91 |
+
model.fit(X_train, y_train)
|
| 92 |
+
|
| 93 |
+
y_pred = model.predict(X_test)
|
| 94 |
+
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred)
|
outputs/checkpoint-60/README.md
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: unsloth/meta-llama-3.1-8b-unsloth-bnb-4bit
|
| 3 |
+
library_name: peft
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# Model Card for Model ID
|
| 7 |
+
|
| 8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## Model Details
|
| 13 |
+
|
| 14 |
+
### Model Description
|
| 15 |
+
|
| 16 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
- **Developed by:** [More Information Needed]
|
| 21 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 22 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 23 |
+
- **Model type:** [More Information Needed]
|
| 24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 25 |
+
- **License:** [More Information Needed]
|
| 26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 27 |
+
|
| 28 |
+
### Model Sources [optional]
|
| 29 |
+
|
| 30 |
+
<!-- Provide the basic links for the model. -->
|
| 31 |
+
|
| 32 |
+
- **Repository:** [More Information Needed]
|
| 33 |
+
- **Paper [optional]:** [More Information Needed]
|
| 34 |
+
- **Demo [optional]:** [More Information Needed]
|
| 35 |
+
|
| 36 |
+
## Uses
|
| 37 |
+
|
| 38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 39 |
+
|
| 40 |
+
### Direct Use
|
| 41 |
+
|
| 42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 43 |
+
|
| 44 |
+
[More Information Needed]
|
| 45 |
+
|
| 46 |
+
### Downstream Use [optional]
|
| 47 |
+
|
| 48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 49 |
+
|
| 50 |
+
[More Information Needed]
|
| 51 |
+
|
| 52 |
+
### Out-of-Scope Use
|
| 53 |
+
|
| 54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 55 |
+
|
| 56 |
+
[More Information Needed]
|
| 57 |
+
|
| 58 |
+
## Bias, Risks, and Limitations
|
| 59 |
+
|
| 60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 61 |
+
|
| 62 |
+
[More Information Needed]
|
| 63 |
+
|
| 64 |
+
### Recommendations
|
| 65 |
+
|
| 66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 67 |
+
|
| 68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 69 |
+
|
| 70 |
+
## How to Get Started with the Model
|
| 71 |
+
|
| 72 |
+
Use the code below to get started with the model.
|
| 73 |
+
|
| 74 |
+
[More Information Needed]
|
| 75 |
+
|
| 76 |
+
## Training Details
|
| 77 |
+
|
| 78 |
+
### Training Data
|
| 79 |
+
|
| 80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 81 |
+
|
| 82 |
+
[More Information Needed]
|
| 83 |
+
|
| 84 |
+
### Training Procedure
|
| 85 |
+
|
| 86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 87 |
+
|
| 88 |
+
#### Preprocessing [optional]
|
| 89 |
+
|
| 90 |
+
[More Information Needed]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
#### Training Hyperparameters
|
| 94 |
+
|
| 95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 96 |
+
|
| 97 |
+
#### Speeds, Sizes, Times [optional]
|
| 98 |
+
|
| 99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 100 |
+
|
| 101 |
+
[More Information Needed]
|
| 102 |
+
|
| 103 |
+
## Evaluation
|
| 104 |
+
|
| 105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 106 |
+
|
| 107 |
+
### Testing Data, Factors & Metrics
|
| 108 |
+
|
| 109 |
+
#### Testing Data
|
| 110 |
+
|
| 111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 112 |
+
|
| 113 |
+
[More Information Needed]
|
| 114 |
+
|
| 115 |
+
#### Factors
|
| 116 |
+
|
| 117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 118 |
+
|
| 119 |
+
[More Information Needed]
|
| 120 |
+
|
| 121 |
+
#### Metrics
|
| 122 |
+
|
| 123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 124 |
+
|
| 125 |
+
[More Information Needed]
|
| 126 |
+
|
| 127 |
+
### Results
|
| 128 |
+
|
| 129 |
+
[More Information Needed]
|
| 130 |
+
|
| 131 |
+
#### Summary
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
## Model Examination [optional]
|
| 136 |
+
|
| 137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 138 |
+
|
| 139 |
+
[More Information Needed]
|
| 140 |
+
|
| 141 |
+
## Environmental Impact
|
| 142 |
+
|
| 143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 144 |
+
|
| 145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 146 |
+
|
| 147 |
+
- **Hardware Type:** [More Information Needed]
|
| 148 |
+
- **Hours used:** [More Information Needed]
|
| 149 |
+
- **Cloud Provider:** [More Information Needed]
|
| 150 |
+
- **Compute Region:** [More Information Needed]
|
| 151 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 152 |
+
|
| 153 |
+
## Technical Specifications [optional]
|
| 154 |
+
|
| 155 |
+
### Model Architecture and Objective
|
| 156 |
+
|
| 157 |
+
[More Information Needed]
|
| 158 |
+
|
| 159 |
+
### Compute Infrastructure
|
| 160 |
+
|
| 161 |
+
[More Information Needed]
|
| 162 |
+
|
| 163 |
+
#### Hardware
|
| 164 |
+
|
| 165 |
+
[More Information Needed]
|
| 166 |
+
|
| 167 |
+
#### Software
|
| 168 |
+
|
| 169 |
+
[More Information Needed]
|
| 170 |
+
|
| 171 |
+
## Citation [optional]
|
| 172 |
+
|
| 173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 174 |
+
|
| 175 |
+
**BibTeX:**
|
| 176 |
+
|
| 177 |
+
[More Information Needed]
|
| 178 |
+
|
| 179 |
+
**APA:**
|
| 180 |
+
|
| 181 |
+
[More Information Needed]
|
| 182 |
+
|
| 183 |
+
## Glossary [optional]
|
| 184 |
+
|
| 185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 186 |
+
|
| 187 |
+
[More Information Needed]
|
| 188 |
+
|
| 189 |
+
## More Information [optional]
|
| 190 |
+
|
| 191 |
+
[More Information Needed]
|
| 192 |
+
|
| 193 |
+
## Model Card Authors [optional]
|
| 194 |
+
|
| 195 |
+
[More Information Needed]
|
| 196 |
+
|
| 197 |
+
## Model Card Contact
|
| 198 |
+
|
| 199 |
+
[More Information Needed]
|
| 200 |
+
### Framework versions
|
| 201 |
+
|
| 202 |
+
- PEFT 0.15.2
|
outputs/checkpoint-60/adapter_config.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "unsloth/meta-llama-3.1-8b-unsloth-bnb-4bit",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": true,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 16,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 16,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"k_proj",
|
| 28 |
+
"o_proj",
|
| 29 |
+
"down_proj",
|
| 30 |
+
"v_proj",
|
| 31 |
+
"up_proj",
|
| 32 |
+
"q_proj",
|
| 33 |
+
"gate_proj"
|
| 34 |
+
],
|
| 35 |
+
"task_type": "CAUSAL_LM",
|
| 36 |
+
"trainable_token_indices": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"use_rslora": false
|
| 39 |
+
}
|
outputs/checkpoint-60/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b96d91d105affb385522c940a8c7ffa656f5594b0e113a6200b057641770c069
|
| 3 |
+
size 167832240
|
outputs/checkpoint-60/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:031e4ccc69cdd32f767fafd301dc1b9ce1b6a310f13f97621cd0e54df79b2e14
|
| 3 |
+
size 85723685
|
outputs/checkpoint-60/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f1d565802a8e26c4e8a31328752b7a7fdc186d9401aa008e65697d0ad8c22e33
|
| 3 |
+
size 14645
|
outputs/checkpoint-60/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8fd7a3e7c36d3a0c58b62d5ad1ac54d1ff59b338836f3a1c760238fa25a50039
|
| 3 |
+
size 1465
|
outputs/checkpoint-60/special_tokens_map.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<|begin_of_text|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|end_of_text|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<|finetune_right_pad_id|>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
}
|
| 23 |
+
}
|
outputs/checkpoint-60/tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
|
| 3 |
+
size 17209920
|
outputs/checkpoint-60/tokenizer_config.json
ADDED
|
@@ -0,0 +1,2066 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": true,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"128000": {
|
| 5 |
+
"content": "<|begin_of_text|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"128001": {
|
| 13 |
+
"content": "<|end_of_text|>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
},
|
| 20 |
+
"128002": {
|
| 21 |
+
"content": "<|reserved_special_token_0|>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false,
|
| 26 |
+
"special": true
|
| 27 |
+
},
|
| 28 |
+
"128003": {
|
| 29 |
+
"content": "<|reserved_special_token_1|>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false,
|
| 34 |
+
"special": true
|
| 35 |
+
},
|
| 36 |
+
"128004": {
|
| 37 |
+
"content": "<|finetune_right_pad_id|>",
|
| 38 |
+
"lstrip": false,
|
| 39 |
+
"normalized": false,
|
| 40 |
+
"rstrip": false,
|
| 41 |
+
"single_word": false,
|
| 42 |
+
"special": true
|
| 43 |
+
},
|
| 44 |
+
"128005": {
|
| 45 |
+
"content": "<|reserved_special_token_2|>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false,
|
| 50 |
+
"special": true
|
| 51 |
+
},
|
| 52 |
+
"128006": {
|
| 53 |
+
"content": "<|start_header_id|>",
|
| 54 |
+
"lstrip": false,
|
| 55 |
+
"normalized": false,
|
| 56 |
+
"rstrip": false,
|
| 57 |
+
"single_word": false,
|
| 58 |
+
"special": true
|
| 59 |
+
},
|
| 60 |
+
"128007": {
|
| 61 |
+
"content": "<|end_header_id|>",
|
| 62 |
+
"lstrip": false,
|
| 63 |
+
"normalized": false,
|
| 64 |
+
"rstrip": false,
|
| 65 |
+
"single_word": false,
|
| 66 |
+
"special": true
|
| 67 |
+
},
|
| 68 |
+
"128008": {
|
| 69 |
+
"content": "<|eom_id|>",
|
| 70 |
+
"lstrip": false,
|
| 71 |
+
"normalized": false,
|
| 72 |
+
"rstrip": false,
|
| 73 |
+
"single_word": false,
|
| 74 |
+
"special": true
|
| 75 |
+
},
|
| 76 |
+
"128009": {
|
| 77 |
+
"content": "<|eot_id|>",
|
| 78 |
+
"lstrip": false,
|
| 79 |
+
"normalized": false,
|
| 80 |
+
"rstrip": false,
|
| 81 |
+
"single_word": false,
|
| 82 |
+
"special": true
|
| 83 |
+
},
|
| 84 |
+
"128010": {
|
| 85 |
+
"content": "<|python_tag|>",
|
| 86 |
+
"lstrip": false,
|
| 87 |
+
"normalized": false,
|
| 88 |
+
"rstrip": false,
|
| 89 |
+
"single_word": false,
|
| 90 |
+
"special": true
|
| 91 |
+
},
|
| 92 |
+
"128011": {
|
| 93 |
+
"content": "<|reserved_special_token_3|>",
|
| 94 |
+
"lstrip": false,
|
| 95 |
+
"normalized": false,
|
| 96 |
+
"rstrip": false,
|
| 97 |
+
"single_word": false,
|
| 98 |
+
"special": true
|
| 99 |
+
},
|
| 100 |
+
"128012": {
|
| 101 |
+
"content": "<|reserved_special_token_4|>",
|
| 102 |
+
"lstrip": false,
|
| 103 |
+
"normalized": false,
|
| 104 |
+
"rstrip": false,
|
| 105 |
+
"single_word": false,
|
| 106 |
+
"special": true
|
| 107 |
+
},
|
| 108 |
+
"128013": {
|
| 109 |
+
"content": "<|reserved_special_token_5|>",
|
| 110 |
+
"lstrip": false,
|
| 111 |
+
"normalized": false,
|
| 112 |
+
"rstrip": false,
|
| 113 |
+
"single_word": false,
|
| 114 |
+
"special": true
|
| 115 |
+
},
|
| 116 |
+
"128014": {
|
| 117 |
+
"content": "<|reserved_special_token_6|>",
|
| 118 |
+
"lstrip": false,
|
| 119 |
+
"normalized": false,
|
| 120 |
+
"rstrip": false,
|
| 121 |
+
"single_word": false,
|
| 122 |
+
"special": true
|
| 123 |
+
},
|
| 124 |
+
"128015": {
|
| 125 |
+
"content": "<|reserved_special_token_7|>",
|
| 126 |
+
"lstrip": false,
|
| 127 |
+
"normalized": false,
|
| 128 |
+
"rstrip": false,
|
| 129 |
+
"single_word": false,
|
| 130 |
+
"special": true
|
| 131 |
+
},
|
| 132 |
+
"128016": {
|
| 133 |
+
"content": "<|reserved_special_token_8|>",
|
| 134 |
+
"lstrip": false,
|
| 135 |
+
"normalized": false,
|
| 136 |
+
"rstrip": false,
|
| 137 |
+
"single_word": false,
|
| 138 |
+
"special": true
|
| 139 |
+
},
|
| 140 |
+
"128017": {
|
| 141 |
+
"content": "<|reserved_special_token_9|>",
|
| 142 |
+
"lstrip": false,
|
| 143 |
+
"normalized": false,
|
| 144 |
+
"rstrip": false,
|
| 145 |
+
"single_word": false,
|
| 146 |
+
"special": true
|
| 147 |
+
},
|
| 148 |
+
"128018": {
|
| 149 |
+
"content": "<|reserved_special_token_10|>",
|
| 150 |
+
"lstrip": false,
|
| 151 |
+
"normalized": false,
|
| 152 |
+
"rstrip": false,
|
| 153 |
+
"single_word": false,
|
| 154 |
+
"special": true
|
| 155 |
+
},
|
| 156 |
+
"128019": {
|
| 157 |
+
"content": "<|reserved_special_token_11|>",
|
| 158 |
+
"lstrip": false,
|
| 159 |
+
"normalized": false,
|
| 160 |
+
"rstrip": false,
|
| 161 |
+
"single_word": false,
|
| 162 |
+
"special": true
|
| 163 |
+
},
|
| 164 |
+
"128020": {
|
| 165 |
+
"content": "<|reserved_special_token_12|>",
|
| 166 |
+
"lstrip": false,
|
| 167 |
+
"normalized": false,
|
| 168 |
+
"rstrip": false,
|
| 169 |
+
"single_word": false,
|
| 170 |
+
"special": true
|
| 171 |
+
},
|
| 172 |
+
"128021": {
|
| 173 |
+
"content": "<|reserved_special_token_13|>",
|
| 174 |
+
"lstrip": false,
|
| 175 |
+
"normalized": false,
|
| 176 |
+
"rstrip": false,
|
| 177 |
+
"single_word": false,
|
| 178 |
+
"special": true
|
| 179 |
+
},
|
| 180 |
+
"128022": {
|
| 181 |
+
"content": "<|reserved_special_token_14|>",
|
| 182 |
+
"lstrip": false,
|
| 183 |
+
"normalized": false,
|
| 184 |
+
"rstrip": false,
|
| 185 |
+
"single_word": false,
|
| 186 |
+
"special": true
|
| 187 |
+
},
|
| 188 |
+
"128023": {
|
| 189 |
+
"content": "<|reserved_special_token_15|>",
|
| 190 |
+
"lstrip": false,
|
| 191 |
+
"normalized": false,
|
| 192 |
+
"rstrip": false,
|
| 193 |
+
"single_word": false,
|
| 194 |
+
"special": true
|
| 195 |
+
},
|
| 196 |
+
"128024": {
|
| 197 |
+
"content": "<|reserved_special_token_16|>",
|
| 198 |
+
"lstrip": false,
|
| 199 |
+
"normalized": false,
|
| 200 |
+
"rstrip": false,
|
| 201 |
+
"single_word": false,
|
| 202 |
+
"special": true
|
| 203 |
+
},
|
| 204 |
+
"128025": {
|
| 205 |
+
"content": "<|reserved_special_token_17|>",
|
| 206 |
+
"lstrip": false,
|
| 207 |
+
"normalized": false,
|
| 208 |
+
"rstrip": false,
|
| 209 |
+
"single_word": false,
|
| 210 |
+
"special": true
|
| 211 |
+
},
|
| 212 |
+
"128026": {
|
| 213 |
+
"content": "<|reserved_special_token_18|>",
|
| 214 |
+
"lstrip": false,
|
| 215 |
+
"normalized": false,
|
| 216 |
+
"rstrip": false,
|
| 217 |
+
"single_word": false,
|
| 218 |
+
"special": true
|
| 219 |
+
},
|
| 220 |
+
"128027": {
|
| 221 |
+
"content": "<|reserved_special_token_19|>",
|
| 222 |
+
"lstrip": false,
|
| 223 |
+
"normalized": false,
|
| 224 |
+
"rstrip": false,
|
| 225 |
+
"single_word": false,
|
| 226 |
+
"special": true
|
| 227 |
+
},
|
| 228 |
+
"128028": {
|
| 229 |
+
"content": "<|reserved_special_token_20|>",
|
| 230 |
+
"lstrip": false,
|
| 231 |
+
"normalized": false,
|
| 232 |
+
"rstrip": false,
|
| 233 |
+
"single_word": false,
|
| 234 |
+
"special": true
|
| 235 |
+
},
|
| 236 |
+
"128029": {
|
| 237 |
+
"content": "<|reserved_special_token_21|>",
|
| 238 |
+
"lstrip": false,
|
| 239 |
+
"normalized": false,
|
| 240 |
+
"rstrip": false,
|
| 241 |
+
"single_word": false,
|
| 242 |
+
"special": true
|
| 243 |
+
},
|
| 244 |
+
"128030": {
|
| 245 |
+
"content": "<|reserved_special_token_22|>",
|
| 246 |
+
"lstrip": false,
|
| 247 |
+
"normalized": false,
|
| 248 |
+
"rstrip": false,
|
| 249 |
+
"single_word": false,
|
| 250 |
+
"special": true
|
| 251 |
+
},
|
| 252 |
+
"128031": {
|
| 253 |
+
"content": "<|reserved_special_token_23|>",
|
| 254 |
+
"lstrip": false,
|
| 255 |
+
"normalized": false,
|
| 256 |
+
"rstrip": false,
|
| 257 |
+
"single_word": false,
|
| 258 |
+
"special": true
|
| 259 |
+
},
|
| 260 |
+
"128032": {
|
| 261 |
+
"content": "<|reserved_special_token_24|>",
|
| 262 |
+
"lstrip": false,
|
| 263 |
+
"normalized": false,
|
| 264 |
+
"rstrip": false,
|
| 265 |
+
"single_word": false,
|
| 266 |
+
"special": true
|
| 267 |
+
},
|
| 268 |
+
"128033": {
|
| 269 |
+
"content": "<|reserved_special_token_25|>",
|
| 270 |
+
"lstrip": false,
|
| 271 |
+
"normalized": false,
|
| 272 |
+
"rstrip": false,
|
| 273 |
+
"single_word": false,
|
| 274 |
+
"special": true
|
| 275 |
+
},
|
| 276 |
+
"128034": {
|
| 277 |
+
"content": "<|reserved_special_token_26|>",
|
| 278 |
+
"lstrip": false,
|
| 279 |
+
"normalized": false,
|
| 280 |
+
"rstrip": false,
|
| 281 |
+
"single_word": false,
|
| 282 |
+
"special": true
|
| 283 |
+
},
|
| 284 |
+
"128035": {
|
| 285 |
+
"content": "<|reserved_special_token_27|>",
|
| 286 |
+
"lstrip": false,
|
| 287 |
+
"normalized": false,
|
| 288 |
+
"rstrip": false,
|
| 289 |
+
"single_word": false,
|
| 290 |
+
"special": true
|
| 291 |
+
},
|
| 292 |
+
"128036": {
|
| 293 |
+
"content": "<|reserved_special_token_28|>",
|
| 294 |
+
"lstrip": false,
|
| 295 |
+
"normalized": false,
|
| 296 |
+
"rstrip": false,
|
| 297 |
+
"single_word": false,
|
| 298 |
+
"special": true
|
| 299 |
+
},
|
| 300 |
+
"128037": {
|
| 301 |
+
"content": "<|reserved_special_token_29|>",
|
| 302 |
+
"lstrip": false,
|
| 303 |
+
"normalized": false,
|
| 304 |
+
"rstrip": false,
|
| 305 |
+
"single_word": false,
|
| 306 |
+
"special": true
|
| 307 |
+
},
|
| 308 |
+
"128038": {
|
| 309 |
+
"content": "<|reserved_special_token_30|>",
|
| 310 |
+
"lstrip": false,
|
| 311 |
+
"normalized": false,
|
| 312 |
+
"rstrip": false,
|
| 313 |
+
"single_word": false,
|
| 314 |
+
"special": true
|
| 315 |
+
},
|
| 316 |
+
"128039": {
|
| 317 |
+
"content": "<|reserved_special_token_31|>",
|
| 318 |
+
"lstrip": false,
|
| 319 |
+
"normalized": false,
|
| 320 |
+
"rstrip": false,
|
| 321 |
+
"single_word": false,
|
| 322 |
+
"special": true
|
| 323 |
+
},
|
| 324 |
+
"128040": {
|
| 325 |
+
"content": "<|reserved_special_token_32|>",
|
| 326 |
+
"lstrip": false,
|
| 327 |
+
"normalized": false,
|
| 328 |
+
"rstrip": false,
|
| 329 |
+
"single_word": false,
|
| 330 |
+
"special": true
|
| 331 |
+
},
|
| 332 |
+
"128041": {
|
| 333 |
+
"content": "<|reserved_special_token_33|>",
|
| 334 |
+
"lstrip": false,
|
| 335 |
+
"normalized": false,
|
| 336 |
+
"rstrip": false,
|
| 337 |
+
"single_word": false,
|
| 338 |
+
"special": true
|
| 339 |
+
},
|
| 340 |
+
"128042": {
|
| 341 |
+
"content": "<|reserved_special_token_34|>",
|
| 342 |
+
"lstrip": false,
|
| 343 |
+
"normalized": false,
|
| 344 |
+
"rstrip": false,
|
| 345 |
+
"single_word": false,
|
| 346 |
+
"special": true
|
| 347 |
+
},
|
| 348 |
+
"128043": {
|
| 349 |
+
"content": "<|reserved_special_token_35|>",
|
| 350 |
+
"lstrip": false,
|
| 351 |
+
"normalized": false,
|
| 352 |
+
"rstrip": false,
|
| 353 |
+
"single_word": false,
|
| 354 |
+
"special": true
|
| 355 |
+
},
|
| 356 |
+
"128044": {
|
| 357 |
+
"content": "<|reserved_special_token_36|>",
|
| 358 |
+
"lstrip": false,
|
| 359 |
+
"normalized": false,
|
| 360 |
+
"rstrip": false,
|
| 361 |
+
"single_word": false,
|
| 362 |
+
"special": true
|
| 363 |
+
},
|
| 364 |
+
"128045": {
|
| 365 |
+
"content": "<|reserved_special_token_37|>",
|
| 366 |
+
"lstrip": false,
|
| 367 |
+
"normalized": false,
|
| 368 |
+
"rstrip": false,
|
| 369 |
+
"single_word": false,
|
| 370 |
+
"special": true
|
| 371 |
+
},
|
| 372 |
+
"128046": {
|
| 373 |
+
"content": "<|reserved_special_token_38|>",
|
| 374 |
+
"lstrip": false,
|
| 375 |
+
"normalized": false,
|
| 376 |
+
"rstrip": false,
|
| 377 |
+
"single_word": false,
|
| 378 |
+
"special": true
|
| 379 |
+
},
|
| 380 |
+
"128047": {
|
| 381 |
+
"content": "<|reserved_special_token_39|>",
|
| 382 |
+
"lstrip": false,
|
| 383 |
+
"normalized": false,
|
| 384 |
+
"rstrip": false,
|
| 385 |
+
"single_word": false,
|
| 386 |
+
"special": true
|
| 387 |
+
},
|
| 388 |
+
"128048": {
|
| 389 |
+
"content": "<|reserved_special_token_40|>",
|
| 390 |
+
"lstrip": false,
|
| 391 |
+
"normalized": false,
|
| 392 |
+
"rstrip": false,
|
| 393 |
+
"single_word": false,
|
| 394 |
+
"special": true
|
| 395 |
+
},
|
| 396 |
+
"128049": {
|
| 397 |
+
"content": "<|reserved_special_token_41|>",
|
| 398 |
+
"lstrip": false,
|
| 399 |
+
"normalized": false,
|
| 400 |
+
"rstrip": false,
|
| 401 |
+
"single_word": false,
|
| 402 |
+
"special": true
|
| 403 |
+
},
|
| 404 |
+
"128050": {
|
| 405 |
+
"content": "<|reserved_special_token_42|>",
|
| 406 |
+
"lstrip": false,
|
| 407 |
+
"normalized": false,
|
| 408 |
+
"rstrip": false,
|
| 409 |
+
"single_word": false,
|
| 410 |
+
"special": true
|
| 411 |
+
},
|
| 412 |
+
"128051": {
|
| 413 |
+
"content": "<|reserved_special_token_43|>",
|
| 414 |
+
"lstrip": false,
|
| 415 |
+
"normalized": false,
|
| 416 |
+
"rstrip": false,
|
| 417 |
+
"single_word": false,
|
| 418 |
+
"special": true
|
| 419 |
+
},
|
| 420 |
+
"128052": {
|
| 421 |
+
"content": "<|reserved_special_token_44|>",
|
| 422 |
+
"lstrip": false,
|
| 423 |
+
"normalized": false,
|
| 424 |
+
"rstrip": false,
|
| 425 |
+
"single_word": false,
|
| 426 |
+
"special": true
|
| 427 |
+
},
|
| 428 |
+
"128053": {
|
| 429 |
+
"content": "<|reserved_special_token_45|>",
|
| 430 |
+
"lstrip": false,
|
| 431 |
+
"normalized": false,
|
| 432 |
+
"rstrip": false,
|
| 433 |
+
"single_word": false,
|
| 434 |
+
"special": true
|
| 435 |
+
},
|
| 436 |
+
"128054": {
|
| 437 |
+
"content": "<|reserved_special_token_46|>",
|
| 438 |
+
"lstrip": false,
|
| 439 |
+
"normalized": false,
|
| 440 |
+
"rstrip": false,
|
| 441 |
+
"single_word": false,
|
| 442 |
+
"special": true
|
| 443 |
+
},
|
| 444 |
+
"128055": {
|
| 445 |
+
"content": "<|reserved_special_token_47|>",
|
| 446 |
+
"lstrip": false,
|
| 447 |
+
"normalized": false,
|
| 448 |
+
"rstrip": false,
|
| 449 |
+
"single_word": false,
|
| 450 |
+
"special": true
|
| 451 |
+
},
|
| 452 |
+
"128056": {
|
| 453 |
+
"content": "<|reserved_special_token_48|>",
|
| 454 |
+
"lstrip": false,
|
| 455 |
+
"normalized": false,
|
| 456 |
+
"rstrip": false,
|
| 457 |
+
"single_word": false,
|
| 458 |
+
"special": true
|
| 459 |
+
},
|
| 460 |
+
"128057": {
|
| 461 |
+
"content": "<|reserved_special_token_49|>",
|
| 462 |
+
"lstrip": false,
|
| 463 |
+
"normalized": false,
|
| 464 |
+
"rstrip": false,
|
| 465 |
+
"single_word": false,
|
| 466 |
+
"special": true
|
| 467 |
+
},
|
| 468 |
+
"128058": {
|
| 469 |
+
"content": "<|reserved_special_token_50|>",
|
| 470 |
+
"lstrip": false,
|
| 471 |
+
"normalized": false,
|
| 472 |
+
"rstrip": false,
|
| 473 |
+
"single_word": false,
|
| 474 |
+
"special": true
|
| 475 |
+
},
|
| 476 |
+
"128059": {
|
| 477 |
+
"content": "<|reserved_special_token_51|>",
|
| 478 |
+
"lstrip": false,
|
| 479 |
+
"normalized": false,
|
| 480 |
+
"rstrip": false,
|
| 481 |
+
"single_word": false,
|
| 482 |
+
"special": true
|
| 483 |
+
},
|
| 484 |
+
"128060": {
|
| 485 |
+
"content": "<|reserved_special_token_52|>",
|
| 486 |
+
"lstrip": false,
|
| 487 |
+
"normalized": false,
|
| 488 |
+
"rstrip": false,
|
| 489 |
+
"single_word": false,
|
| 490 |
+
"special": true
|
| 491 |
+
},
|
| 492 |
+
"128061": {
|
| 493 |
+
"content": "<|reserved_special_token_53|>",
|
| 494 |
+
"lstrip": false,
|
| 495 |
+
"normalized": false,
|
| 496 |
+
"rstrip": false,
|
| 497 |
+
"single_word": false,
|
| 498 |
+
"special": true
|
| 499 |
+
},
|
| 500 |
+
"128062": {
|
| 501 |
+
"content": "<|reserved_special_token_54|>",
|
| 502 |
+
"lstrip": false,
|
| 503 |
+
"normalized": false,
|
| 504 |
+
"rstrip": false,
|
| 505 |
+
"single_word": false,
|
| 506 |
+
"special": true
|
| 507 |
+
},
|
| 508 |
+
"128063": {
|
| 509 |
+
"content": "<|reserved_special_token_55|>",
|
| 510 |
+
"lstrip": false,
|
| 511 |
+
"normalized": false,
|
| 512 |
+
"rstrip": false,
|
| 513 |
+
"single_word": false,
|
| 514 |
+
"special": true
|
| 515 |
+
},
|
| 516 |
+
"128064": {
|
| 517 |
+
"content": "<|reserved_special_token_56|>",
|
| 518 |
+
"lstrip": false,
|
| 519 |
+
"normalized": false,
|
| 520 |
+
"rstrip": false,
|
| 521 |
+
"single_word": false,
|
| 522 |
+
"special": true
|
| 523 |
+
},
|
| 524 |
+
"128065": {
|
| 525 |
+
"content": "<|reserved_special_token_57|>",
|
| 526 |
+
"lstrip": false,
|
| 527 |
+
"normalized": false,
|
| 528 |
+
"rstrip": false,
|
| 529 |
+
"single_word": false,
|
| 530 |
+
"special": true
|
| 531 |
+
},
|
| 532 |
+
"128066": {
|
| 533 |
+
"content": "<|reserved_special_token_58|>",
|
| 534 |
+
"lstrip": false,
|
| 535 |
+
"normalized": false,
|
| 536 |
+
"rstrip": false,
|
| 537 |
+
"single_word": false,
|
| 538 |
+
"special": true
|
| 539 |
+
},
|
| 540 |
+
"128067": {
|
| 541 |
+
"content": "<|reserved_special_token_59|>",
|
| 542 |
+
"lstrip": false,
|
| 543 |
+
"normalized": false,
|
| 544 |
+
"rstrip": false,
|
| 545 |
+
"single_word": false,
|
| 546 |
+
"special": true
|
| 547 |
+
},
|
| 548 |
+
"128068": {
|
| 549 |
+
"content": "<|reserved_special_token_60|>",
|
| 550 |
+
"lstrip": false,
|
| 551 |
+
"normalized": false,
|
| 552 |
+
"rstrip": false,
|
| 553 |
+
"single_word": false,
|
| 554 |
+
"special": true
|
| 555 |
+
},
|
| 556 |
+
"128069": {
|
| 557 |
+
"content": "<|reserved_special_token_61|>",
|
| 558 |
+
"lstrip": false,
|
| 559 |
+
"normalized": false,
|
| 560 |
+
"rstrip": false,
|
| 561 |
+
"single_word": false,
|
| 562 |
+
"special": true
|
| 563 |
+
},
|
| 564 |
+
"128070": {
|
| 565 |
+
"content": "<|reserved_special_token_62|>",
|
| 566 |
+
"lstrip": false,
|
| 567 |
+
"normalized": false,
|
| 568 |
+
"rstrip": false,
|
| 569 |
+
"single_word": false,
|
| 570 |
+
"special": true
|
| 571 |
+
},
|
| 572 |
+
"128071": {
|
| 573 |
+
"content": "<|reserved_special_token_63|>",
|
| 574 |
+
"lstrip": false,
|
| 575 |
+
"normalized": false,
|
| 576 |
+
"rstrip": false,
|
| 577 |
+
"single_word": false,
|
| 578 |
+
"special": true
|
| 579 |
+
},
|
| 580 |
+
"128072": {
|
| 581 |
+
"content": "<|reserved_special_token_64|>",
|
| 582 |
+
"lstrip": false,
|
| 583 |
+
"normalized": false,
|
| 584 |
+
"rstrip": false,
|
| 585 |
+
"single_word": false,
|
| 586 |
+
"special": true
|
| 587 |
+
},
|
| 588 |
+
"128073": {
|
| 589 |
+
"content": "<|reserved_special_token_65|>",
|
| 590 |
+
"lstrip": false,
|
| 591 |
+
"normalized": false,
|
| 592 |
+
"rstrip": false,
|
| 593 |
+
"single_word": false,
|
| 594 |
+
"special": true
|
| 595 |
+
},
|
| 596 |
+
"128074": {
|
| 597 |
+
"content": "<|reserved_special_token_66|>",
|
| 598 |
+
"lstrip": false,
|
| 599 |
+
"normalized": false,
|
| 600 |
+
"rstrip": false,
|
| 601 |
+
"single_word": false,
|
| 602 |
+
"special": true
|
| 603 |
+
},
|
| 604 |
+
"128075": {
|
| 605 |
+
"content": "<|reserved_special_token_67|>",
|
| 606 |
+
"lstrip": false,
|
| 607 |
+
"normalized": false,
|
| 608 |
+
"rstrip": false,
|
| 609 |
+
"single_word": false,
|
| 610 |
+
"special": true
|
| 611 |
+
},
|
| 612 |
+
"128076": {
|
| 613 |
+
"content": "<|reserved_special_token_68|>",
|
| 614 |
+
"lstrip": false,
|
| 615 |
+
"normalized": false,
|
| 616 |
+
"rstrip": false,
|
| 617 |
+
"single_word": false,
|
| 618 |
+
"special": true
|
| 619 |
+
},
|
| 620 |
+
"128077": {
|
| 621 |
+
"content": "<|reserved_special_token_69|>",
|
| 622 |
+
"lstrip": false,
|
| 623 |
+
"normalized": false,
|
| 624 |
+
"rstrip": false,
|
| 625 |
+
"single_word": false,
|
| 626 |
+
"special": true
|
| 627 |
+
},
|
| 628 |
+
"128078": {
|
| 629 |
+
"content": "<|reserved_special_token_70|>",
|
| 630 |
+
"lstrip": false,
|
| 631 |
+
"normalized": false,
|
| 632 |
+
"rstrip": false,
|
| 633 |
+
"single_word": false,
|
| 634 |
+
"special": true
|
| 635 |
+
},
|
| 636 |
+
"128079": {
|
| 637 |
+
"content": "<|reserved_special_token_71|>",
|
| 638 |
+
"lstrip": false,
|
| 639 |
+
"normalized": false,
|
| 640 |
+
"rstrip": false,
|
| 641 |
+
"single_word": false,
|
| 642 |
+
"special": true
|
| 643 |
+
},
|
| 644 |
+
"128080": {
|
| 645 |
+
"content": "<|reserved_special_token_72|>",
|
| 646 |
+
"lstrip": false,
|
| 647 |
+
"normalized": false,
|
| 648 |
+
"rstrip": false,
|
| 649 |
+
"single_word": false,
|
| 650 |
+
"special": true
|
| 651 |
+
},
|
| 652 |
+
"128081": {
|
| 653 |
+
"content": "<|reserved_special_token_73|>",
|
| 654 |
+
"lstrip": false,
|
| 655 |
+
"normalized": false,
|
| 656 |
+
"rstrip": false,
|
| 657 |
+
"single_word": false,
|
| 658 |
+
"special": true
|
| 659 |
+
},
|
| 660 |
+
"128082": {
|
| 661 |
+
"content": "<|reserved_special_token_74|>",
|
| 662 |
+
"lstrip": false,
|
| 663 |
+
"normalized": false,
|
| 664 |
+
"rstrip": false,
|
| 665 |
+
"single_word": false,
|
| 666 |
+
"special": true
|
| 667 |
+
},
|
| 668 |
+
"128083": {
|
| 669 |
+
"content": "<|reserved_special_token_75|>",
|
| 670 |
+
"lstrip": false,
|
| 671 |
+
"normalized": false,
|
| 672 |
+
"rstrip": false,
|
| 673 |
+
"single_word": false,
|
| 674 |
+
"special": true
|
| 675 |
+
},
|
| 676 |
+
"128084": {
|
| 677 |
+
"content": "<|reserved_special_token_76|>",
|
| 678 |
+
"lstrip": false,
|
| 679 |
+
"normalized": false,
|
| 680 |
+
"rstrip": false,
|
| 681 |
+
"single_word": false,
|
| 682 |
+
"special": true
|
| 683 |
+
},
|
| 684 |
+
"128085": {
|
| 685 |
+
"content": "<|reserved_special_token_77|>",
|
| 686 |
+
"lstrip": false,
|
| 687 |
+
"normalized": false,
|
| 688 |
+
"rstrip": false,
|
| 689 |
+
"single_word": false,
|
| 690 |
+
"special": true
|
| 691 |
+
},
|
| 692 |
+
"128086": {
|
| 693 |
+
"content": "<|reserved_special_token_78|>",
|
| 694 |
+
"lstrip": false,
|
| 695 |
+
"normalized": false,
|
| 696 |
+
"rstrip": false,
|
| 697 |
+
"single_word": false,
|
| 698 |
+
"special": true
|
| 699 |
+
},
|
| 700 |
+
"128087": {
|
| 701 |
+
"content": "<|reserved_special_token_79|>",
|
| 702 |
+
"lstrip": false,
|
| 703 |
+
"normalized": false,
|
| 704 |
+
"rstrip": false,
|
| 705 |
+
"single_word": false,
|
| 706 |
+
"special": true
|
| 707 |
+
},
|
| 708 |
+
"128088": {
|
| 709 |
+
"content": "<|reserved_special_token_80|>",
|
| 710 |
+
"lstrip": false,
|
| 711 |
+
"normalized": false,
|
| 712 |
+
"rstrip": false,
|
| 713 |
+
"single_word": false,
|
| 714 |
+
"special": true
|
| 715 |
+
},
|
| 716 |
+
"128089": {
|
| 717 |
+
"content": "<|reserved_special_token_81|>",
|
| 718 |
+
"lstrip": false,
|
| 719 |
+
"normalized": false,
|
| 720 |
+
"rstrip": false,
|
| 721 |
+
"single_word": false,
|
| 722 |
+
"special": true
|
| 723 |
+
},
|
| 724 |
+
"128090": {
|
| 725 |
+
"content": "<|reserved_special_token_82|>",
|
| 726 |
+
"lstrip": false,
|
| 727 |
+
"normalized": false,
|
| 728 |
+
"rstrip": false,
|
| 729 |
+
"single_word": false,
|
| 730 |
+
"special": true
|
| 731 |
+
},
|
| 732 |
+
"128091": {
|
| 733 |
+
"content": "<|reserved_special_token_83|>",
|
| 734 |
+
"lstrip": false,
|
| 735 |
+
"normalized": false,
|
| 736 |
+
"rstrip": false,
|
| 737 |
+
"single_word": false,
|
| 738 |
+
"special": true
|
| 739 |
+
},
|
| 740 |
+
"128092": {
|
| 741 |
+
"content": "<|reserved_special_token_84|>",
|
| 742 |
+
"lstrip": false,
|
| 743 |
+
"normalized": false,
|
| 744 |
+
"rstrip": false,
|
| 745 |
+
"single_word": false,
|
| 746 |
+
"special": true
|
| 747 |
+
},
|
| 748 |
+
"128093": {
|
| 749 |
+
"content": "<|reserved_special_token_85|>",
|
| 750 |
+
"lstrip": false,
|
| 751 |
+
"normalized": false,
|
| 752 |
+
"rstrip": false,
|
| 753 |
+
"single_word": false,
|
| 754 |
+
"special": true
|
| 755 |
+
},
|
| 756 |
+
"128094": {
|
| 757 |
+
"content": "<|reserved_special_token_86|>",
|
| 758 |
+
"lstrip": false,
|
| 759 |
+
"normalized": false,
|
| 760 |
+
"rstrip": false,
|
| 761 |
+
"single_word": false,
|
| 762 |
+
"special": true
|
| 763 |
+
},
|
| 764 |
+
"128095": {
|
| 765 |
+
"content": "<|reserved_special_token_87|>",
|
| 766 |
+
"lstrip": false,
|
| 767 |
+
"normalized": false,
|
| 768 |
+
"rstrip": false,
|
| 769 |
+
"single_word": false,
|
| 770 |
+
"special": true
|
| 771 |
+
},
|
| 772 |
+
"128096": {
|
| 773 |
+
"content": "<|reserved_special_token_88|>",
|
| 774 |
+
"lstrip": false,
|
| 775 |
+
"normalized": false,
|
| 776 |
+
"rstrip": false,
|
| 777 |
+
"single_word": false,
|
| 778 |
+
"special": true
|
| 779 |
+
},
|
| 780 |
+
"128097": {
|
| 781 |
+
"content": "<|reserved_special_token_89|>",
|
| 782 |
+
"lstrip": false,
|
| 783 |
+
"normalized": false,
|
| 784 |
+
"rstrip": false,
|
| 785 |
+
"single_word": false,
|
| 786 |
+
"special": true
|
| 787 |
+
},
|
| 788 |
+
"128098": {
|
| 789 |
+
"content": "<|reserved_special_token_90|>",
|
| 790 |
+
"lstrip": false,
|
| 791 |
+
"normalized": false,
|
| 792 |
+
"rstrip": false,
|
| 793 |
+
"single_word": false,
|
| 794 |
+
"special": true
|
| 795 |
+
},
|
| 796 |
+
"128099": {
|
| 797 |
+
"content": "<|reserved_special_token_91|>",
|
| 798 |
+
"lstrip": false,
|
| 799 |
+
"normalized": false,
|
| 800 |
+
"rstrip": false,
|
| 801 |
+
"single_word": false,
|
| 802 |
+
"special": true
|
| 803 |
+
},
|
| 804 |
+
"128100": {
|
| 805 |
+
"content": "<|reserved_special_token_92|>",
|
| 806 |
+
"lstrip": false,
|
| 807 |
+
"normalized": false,
|
| 808 |
+
"rstrip": false,
|
| 809 |
+
"single_word": false,
|
| 810 |
+
"special": true
|
| 811 |
+
},
|
| 812 |
+
"128101": {
|
| 813 |
+
"content": "<|reserved_special_token_93|>",
|
| 814 |
+
"lstrip": false,
|
| 815 |
+
"normalized": false,
|
| 816 |
+
"rstrip": false,
|
| 817 |
+
"single_word": false,
|
| 818 |
+
"special": true
|
| 819 |
+
},
|
| 820 |
+
"128102": {
|
| 821 |
+
"content": "<|reserved_special_token_94|>",
|
| 822 |
+
"lstrip": false,
|
| 823 |
+
"normalized": false,
|
| 824 |
+
"rstrip": false,
|
| 825 |
+
"single_word": false,
|
| 826 |
+
"special": true
|
| 827 |
+
},
|
| 828 |
+
"128103": {
|
| 829 |
+
"content": "<|reserved_special_token_95|>",
|
| 830 |
+
"lstrip": false,
|
| 831 |
+
"normalized": false,
|
| 832 |
+
"rstrip": false,
|
| 833 |
+
"single_word": false,
|
| 834 |
+
"special": true
|
| 835 |
+
},
|
| 836 |
+
"128104": {
|
| 837 |
+
"content": "<|reserved_special_token_96|>",
|
| 838 |
+
"lstrip": false,
|
| 839 |
+
"normalized": false,
|
| 840 |
+
"rstrip": false,
|
| 841 |
+
"single_word": false,
|
| 842 |
+
"special": true
|
| 843 |
+
},
|
| 844 |
+
"128105": {
|
| 845 |
+
"content": "<|reserved_special_token_97|>",
|
| 846 |
+
"lstrip": false,
|
| 847 |
+
"normalized": false,
|
| 848 |
+
"rstrip": false,
|
| 849 |
+
"single_word": false,
|
| 850 |
+
"special": true
|
| 851 |
+
},
|
| 852 |
+
"128106": {
|
| 853 |
+
"content": "<|reserved_special_token_98|>",
|
| 854 |
+
"lstrip": false,
|
| 855 |
+
"normalized": false,
|
| 856 |
+
"rstrip": false,
|
| 857 |
+
"single_word": false,
|
| 858 |
+
"special": true
|
| 859 |
+
},
|
| 860 |
+
"128107": {
|
| 861 |
+
"content": "<|reserved_special_token_99|>",
|
| 862 |
+
"lstrip": false,
|
| 863 |
+
"normalized": false,
|
| 864 |
+
"rstrip": false,
|
| 865 |
+
"single_word": false,
|
| 866 |
+
"special": true
|
| 867 |
+
},
|
| 868 |
+
"128108": {
|
| 869 |
+
"content": "<|reserved_special_token_100|>",
|
| 870 |
+
"lstrip": false,
|
| 871 |
+
"normalized": false,
|
| 872 |
+
"rstrip": false,
|
| 873 |
+
"single_word": false,
|
| 874 |
+
"special": true
|
| 875 |
+
},
|
| 876 |
+
"128109": {
|
| 877 |
+
"content": "<|reserved_special_token_101|>",
|
| 878 |
+
"lstrip": false,
|
| 879 |
+
"normalized": false,
|
| 880 |
+
"rstrip": false,
|
| 881 |
+
"single_word": false,
|
| 882 |
+
"special": true
|
| 883 |
+
},
|
| 884 |
+
"128110": {
|
| 885 |
+
"content": "<|reserved_special_token_102|>",
|
| 886 |
+
"lstrip": false,
|
| 887 |
+
"normalized": false,
|
| 888 |
+
"rstrip": false,
|
| 889 |
+
"single_word": false,
|
| 890 |
+
"special": true
|
| 891 |
+
},
|
| 892 |
+
"128111": {
|
| 893 |
+
"content": "<|reserved_special_token_103|>",
|
| 894 |
+
"lstrip": false,
|
| 895 |
+
"normalized": false,
|
| 896 |
+
"rstrip": false,
|
| 897 |
+
"single_word": false,
|
| 898 |
+
"special": true
|
| 899 |
+
},
|
| 900 |
+
"128112": {
|
| 901 |
+
"content": "<|reserved_special_token_104|>",
|
| 902 |
+
"lstrip": false,
|
| 903 |
+
"normalized": false,
|
| 904 |
+
"rstrip": false,
|
| 905 |
+
"single_word": false,
|
| 906 |
+
"special": true
|
| 907 |
+
},
|
| 908 |
+
"128113": {
|
| 909 |
+
"content": "<|reserved_special_token_105|>",
|
| 910 |
+
"lstrip": false,
|
| 911 |
+
"normalized": false,
|
| 912 |
+
"rstrip": false,
|
| 913 |
+
"single_word": false,
|
| 914 |
+
"special": true
|
| 915 |
+
},
|
| 916 |
+
"128114": {
|
| 917 |
+
"content": "<|reserved_special_token_106|>",
|
| 918 |
+
"lstrip": false,
|
| 919 |
+
"normalized": false,
|
| 920 |
+
"rstrip": false,
|
| 921 |
+
"single_word": false,
|
| 922 |
+
"special": true
|
| 923 |
+
},
|
| 924 |
+
"128115": {
|
| 925 |
+
"content": "<|reserved_special_token_107|>",
|
| 926 |
+
"lstrip": false,
|
| 927 |
+
"normalized": false,
|
| 928 |
+
"rstrip": false,
|
| 929 |
+
"single_word": false,
|
| 930 |
+
"special": true
|
| 931 |
+
},
|
| 932 |
+
"128116": {
|
| 933 |
+
"content": "<|reserved_special_token_108|>",
|
| 934 |
+
"lstrip": false,
|
| 935 |
+
"normalized": false,
|
| 936 |
+
"rstrip": false,
|
| 937 |
+
"single_word": false,
|
| 938 |
+
"special": true
|
| 939 |
+
},
|
| 940 |
+
"128117": {
|
| 941 |
+
"content": "<|reserved_special_token_109|>",
|
| 942 |
+
"lstrip": false,
|
| 943 |
+
"normalized": false,
|
| 944 |
+
"rstrip": false,
|
| 945 |
+
"single_word": false,
|
| 946 |
+
"special": true
|
| 947 |
+
},
|
| 948 |
+
"128118": {
|
| 949 |
+
"content": "<|reserved_special_token_110|>",
|
| 950 |
+
"lstrip": false,
|
| 951 |
+
"normalized": false,
|
| 952 |
+
"rstrip": false,
|
| 953 |
+
"single_word": false,
|
| 954 |
+
"special": true
|
| 955 |
+
},
|
| 956 |
+
"128119": {
|
| 957 |
+
"content": "<|reserved_special_token_111|>",
|
| 958 |
+
"lstrip": false,
|
| 959 |
+
"normalized": false,
|
| 960 |
+
"rstrip": false,
|
| 961 |
+
"single_word": false,
|
| 962 |
+
"special": true
|
| 963 |
+
},
|
| 964 |
+
"128120": {
|
| 965 |
+
"content": "<|reserved_special_token_112|>",
|
| 966 |
+
"lstrip": false,
|
| 967 |
+
"normalized": false,
|
| 968 |
+
"rstrip": false,
|
| 969 |
+
"single_word": false,
|
| 970 |
+
"special": true
|
| 971 |
+
},
|
| 972 |
+
"128121": {
|
| 973 |
+
"content": "<|reserved_special_token_113|>",
|
| 974 |
+
"lstrip": false,
|
| 975 |
+
"normalized": false,
|
| 976 |
+
"rstrip": false,
|
| 977 |
+
"single_word": false,
|
| 978 |
+
"special": true
|
| 979 |
+
},
|
| 980 |
+
"128122": {
|
| 981 |
+
"content": "<|reserved_special_token_114|>",
|
| 982 |
+
"lstrip": false,
|
| 983 |
+
"normalized": false,
|
| 984 |
+
"rstrip": false,
|
| 985 |
+
"single_word": false,
|
| 986 |
+
"special": true
|
| 987 |
+
},
|
| 988 |
+
"128123": {
|
| 989 |
+
"content": "<|reserved_special_token_115|>",
|
| 990 |
+
"lstrip": false,
|
| 991 |
+
"normalized": false,
|
| 992 |
+
"rstrip": false,
|
| 993 |
+
"single_word": false,
|
| 994 |
+
"special": true
|
| 995 |
+
},
|
| 996 |
+
"128124": {
|
| 997 |
+
"content": "<|reserved_special_token_116|>",
|
| 998 |
+
"lstrip": false,
|
| 999 |
+
"normalized": false,
|
| 1000 |
+
"rstrip": false,
|
| 1001 |
+
"single_word": false,
|
| 1002 |
+
"special": true
|
| 1003 |
+
},
|
| 1004 |
+
"128125": {
|
| 1005 |
+
"content": "<|reserved_special_token_117|>",
|
| 1006 |
+
"lstrip": false,
|
| 1007 |
+
"normalized": false,
|
| 1008 |
+
"rstrip": false,
|
| 1009 |
+
"single_word": false,
|
| 1010 |
+
"special": true
|
| 1011 |
+
},
|
| 1012 |
+
"128126": {
|
| 1013 |
+
"content": "<|reserved_special_token_118|>",
|
| 1014 |
+
"lstrip": false,
|
| 1015 |
+
"normalized": false,
|
| 1016 |
+
"rstrip": false,
|
| 1017 |
+
"single_word": false,
|
| 1018 |
+
"special": true
|
| 1019 |
+
},
|
| 1020 |
+
"128127": {
|
| 1021 |
+
"content": "<|reserved_special_token_119|>",
|
| 1022 |
+
"lstrip": false,
|
| 1023 |
+
"normalized": false,
|
| 1024 |
+
"rstrip": false,
|
| 1025 |
+
"single_word": false,
|
| 1026 |
+
"special": true
|
| 1027 |
+
},
|
| 1028 |
+
"128128": {
|
| 1029 |
+
"content": "<|reserved_special_token_120|>",
|
| 1030 |
+
"lstrip": false,
|
| 1031 |
+
"normalized": false,
|
| 1032 |
+
"rstrip": false,
|
| 1033 |
+
"single_word": false,
|
| 1034 |
+
"special": true
|
| 1035 |
+
},
|
| 1036 |
+
"128129": {
|
| 1037 |
+
"content": "<|reserved_special_token_121|>",
|
| 1038 |
+
"lstrip": false,
|
| 1039 |
+
"normalized": false,
|
| 1040 |
+
"rstrip": false,
|
| 1041 |
+
"single_word": false,
|
| 1042 |
+
"special": true
|
| 1043 |
+
},
|
| 1044 |
+
"128130": {
|
| 1045 |
+
"content": "<|reserved_special_token_122|>",
|
| 1046 |
+
"lstrip": false,
|
| 1047 |
+
"normalized": false,
|
| 1048 |
+
"rstrip": false,
|
| 1049 |
+
"single_word": false,
|
| 1050 |
+
"special": true
|
| 1051 |
+
},
|
| 1052 |
+
"128131": {
|
| 1053 |
+
"content": "<|reserved_special_token_123|>",
|
| 1054 |
+
"lstrip": false,
|
| 1055 |
+
"normalized": false,
|
| 1056 |
+
"rstrip": false,
|
| 1057 |
+
"single_word": false,
|
| 1058 |
+
"special": true
|
| 1059 |
+
},
|
| 1060 |
+
"128132": {
|
| 1061 |
+
"content": "<|reserved_special_token_124|>",
|
| 1062 |
+
"lstrip": false,
|
| 1063 |
+
"normalized": false,
|
| 1064 |
+
"rstrip": false,
|
| 1065 |
+
"single_word": false,
|
| 1066 |
+
"special": true
|
| 1067 |
+
},
|
| 1068 |
+
"128133": {
|
| 1069 |
+
"content": "<|reserved_special_token_125|>",
|
| 1070 |
+
"lstrip": false,
|
| 1071 |
+
"normalized": false,
|
| 1072 |
+
"rstrip": false,
|
| 1073 |
+
"single_word": false,
|
| 1074 |
+
"special": true
|
| 1075 |
+
},
|
| 1076 |
+
"128134": {
|
| 1077 |
+
"content": "<|reserved_special_token_126|>",
|
| 1078 |
+
"lstrip": false,
|
| 1079 |
+
"normalized": false,
|
| 1080 |
+
"rstrip": false,
|
| 1081 |
+
"single_word": false,
|
| 1082 |
+
"special": true
|
| 1083 |
+
},
|
| 1084 |
+
"128135": {
|
| 1085 |
+
"content": "<|reserved_special_token_127|>",
|
| 1086 |
+
"lstrip": false,
|
| 1087 |
+
"normalized": false,
|
| 1088 |
+
"rstrip": false,
|
| 1089 |
+
"single_word": false,
|
| 1090 |
+
"special": true
|
| 1091 |
+
},
|
| 1092 |
+
"128136": {
|
| 1093 |
+
"content": "<|reserved_special_token_128|>",
|
| 1094 |
+
"lstrip": false,
|
| 1095 |
+
"normalized": false,
|
| 1096 |
+
"rstrip": false,
|
| 1097 |
+
"single_word": false,
|
| 1098 |
+
"special": true
|
| 1099 |
+
},
|
| 1100 |
+
"128137": {
|
| 1101 |
+
"content": "<|reserved_special_token_129|>",
|
| 1102 |
+
"lstrip": false,
|
| 1103 |
+
"normalized": false,
|
| 1104 |
+
"rstrip": false,
|
| 1105 |
+
"single_word": false,
|
| 1106 |
+
"special": true
|
| 1107 |
+
},
|
| 1108 |
+
"128138": {
|
| 1109 |
+
"content": "<|reserved_special_token_130|>",
|
| 1110 |
+
"lstrip": false,
|
| 1111 |
+
"normalized": false,
|
| 1112 |
+
"rstrip": false,
|
| 1113 |
+
"single_word": false,
|
| 1114 |
+
"special": true
|
| 1115 |
+
},
|
| 1116 |
+
"128139": {
|
| 1117 |
+
"content": "<|reserved_special_token_131|>",
|
| 1118 |
+
"lstrip": false,
|
| 1119 |
+
"normalized": false,
|
| 1120 |
+
"rstrip": false,
|
| 1121 |
+
"single_word": false,
|
| 1122 |
+
"special": true
|
| 1123 |
+
},
|
| 1124 |
+
"128140": {
|
| 1125 |
+
"content": "<|reserved_special_token_132|>",
|
| 1126 |
+
"lstrip": false,
|
| 1127 |
+
"normalized": false,
|
| 1128 |
+
"rstrip": false,
|
| 1129 |
+
"single_word": false,
|
| 1130 |
+
"special": true
|
| 1131 |
+
},
|
| 1132 |
+
"128141": {
|
| 1133 |
+
"content": "<|reserved_special_token_133|>",
|
| 1134 |
+
"lstrip": false,
|
| 1135 |
+
"normalized": false,
|
| 1136 |
+
"rstrip": false,
|
| 1137 |
+
"single_word": false,
|
| 1138 |
+
"special": true
|
| 1139 |
+
},
|
| 1140 |
+
"128142": {
|
| 1141 |
+
"content": "<|reserved_special_token_134|>",
|
| 1142 |
+
"lstrip": false,
|
| 1143 |
+
"normalized": false,
|
| 1144 |
+
"rstrip": false,
|
| 1145 |
+
"single_word": false,
|
| 1146 |
+
"special": true
|
| 1147 |
+
},
|
| 1148 |
+
"128143": {
|
| 1149 |
+
"content": "<|reserved_special_token_135|>",
|
| 1150 |
+
"lstrip": false,
|
| 1151 |
+
"normalized": false,
|
| 1152 |
+
"rstrip": false,
|
| 1153 |
+
"single_word": false,
|
| 1154 |
+
"special": true
|
| 1155 |
+
},
|
| 1156 |
+
"128144": {
|
| 1157 |
+
"content": "<|reserved_special_token_136|>",
|
| 1158 |
+
"lstrip": false,
|
| 1159 |
+
"normalized": false,
|
| 1160 |
+
"rstrip": false,
|
| 1161 |
+
"single_word": false,
|
| 1162 |
+
"special": true
|
| 1163 |
+
},
|
| 1164 |
+
"128145": {
|
| 1165 |
+
"content": "<|reserved_special_token_137|>",
|
| 1166 |
+
"lstrip": false,
|
| 1167 |
+
"normalized": false,
|
| 1168 |
+
"rstrip": false,
|
| 1169 |
+
"single_word": false,
|
| 1170 |
+
"special": true
|
| 1171 |
+
},
|
| 1172 |
+
"128146": {
|
| 1173 |
+
"content": "<|reserved_special_token_138|>",
|
| 1174 |
+
"lstrip": false,
|
| 1175 |
+
"normalized": false,
|
| 1176 |
+
"rstrip": false,
|
| 1177 |
+
"single_word": false,
|
| 1178 |
+
"special": true
|
| 1179 |
+
},
|
| 1180 |
+
"128147": {
|
| 1181 |
+
"content": "<|reserved_special_token_139|>",
|
| 1182 |
+
"lstrip": false,
|
| 1183 |
+
"normalized": false,
|
| 1184 |
+
"rstrip": false,
|
| 1185 |
+
"single_word": false,
|
| 1186 |
+
"special": true
|
| 1187 |
+
},
|
| 1188 |
+
"128148": {
|
| 1189 |
+
"content": "<|reserved_special_token_140|>",
|
| 1190 |
+
"lstrip": false,
|
| 1191 |
+
"normalized": false,
|
| 1192 |
+
"rstrip": false,
|
| 1193 |
+
"single_word": false,
|
| 1194 |
+
"special": true
|
| 1195 |
+
},
|
| 1196 |
+
"128149": {
|
| 1197 |
+
"content": "<|reserved_special_token_141|>",
|
| 1198 |
+
"lstrip": false,
|
| 1199 |
+
"normalized": false,
|
| 1200 |
+
"rstrip": false,
|
| 1201 |
+
"single_word": false,
|
| 1202 |
+
"special": true
|
| 1203 |
+
},
|
| 1204 |
+
"128150": {
|
| 1205 |
+
"content": "<|reserved_special_token_142|>",
|
| 1206 |
+
"lstrip": false,
|
| 1207 |
+
"normalized": false,
|
| 1208 |
+
"rstrip": false,
|
| 1209 |
+
"single_word": false,
|
| 1210 |
+
"special": true
|
| 1211 |
+
},
|
| 1212 |
+
"128151": {
|
| 1213 |
+
"content": "<|reserved_special_token_143|>",
|
| 1214 |
+
"lstrip": false,
|
| 1215 |
+
"normalized": false,
|
| 1216 |
+
"rstrip": false,
|
| 1217 |
+
"single_word": false,
|
| 1218 |
+
"special": true
|
| 1219 |
+
},
|
| 1220 |
+
"128152": {
|
| 1221 |
+
"content": "<|reserved_special_token_144|>",
|
| 1222 |
+
"lstrip": false,
|
| 1223 |
+
"normalized": false,
|
| 1224 |
+
"rstrip": false,
|
| 1225 |
+
"single_word": false,
|
| 1226 |
+
"special": true
|
| 1227 |
+
},
|
| 1228 |
+
"128153": {
|
| 1229 |
+
"content": "<|reserved_special_token_145|>",
|
| 1230 |
+
"lstrip": false,
|
| 1231 |
+
"normalized": false,
|
| 1232 |
+
"rstrip": false,
|
| 1233 |
+
"single_word": false,
|
| 1234 |
+
"special": true
|
| 1235 |
+
},
|
| 1236 |
+
"128154": {
|
| 1237 |
+
"content": "<|reserved_special_token_146|>",
|
| 1238 |
+
"lstrip": false,
|
| 1239 |
+
"normalized": false,
|
| 1240 |
+
"rstrip": false,
|
| 1241 |
+
"single_word": false,
|
| 1242 |
+
"special": true
|
| 1243 |
+
},
|
| 1244 |
+
"128155": {
|
| 1245 |
+
"content": "<|reserved_special_token_147|>",
|
| 1246 |
+
"lstrip": false,
|
| 1247 |
+
"normalized": false,
|
| 1248 |
+
"rstrip": false,
|
| 1249 |
+
"single_word": false,
|
| 1250 |
+
"special": true
|
| 1251 |
+
},
|
| 1252 |
+
"128156": {
|
| 1253 |
+
"content": "<|reserved_special_token_148|>",
|
| 1254 |
+
"lstrip": false,
|
| 1255 |
+
"normalized": false,
|
| 1256 |
+
"rstrip": false,
|
| 1257 |
+
"single_word": false,
|
| 1258 |
+
"special": true
|
| 1259 |
+
},
|
| 1260 |
+
"128157": {
|
| 1261 |
+
"content": "<|reserved_special_token_149|>",
|
| 1262 |
+
"lstrip": false,
|
| 1263 |
+
"normalized": false,
|
| 1264 |
+
"rstrip": false,
|
| 1265 |
+
"single_word": false,
|
| 1266 |
+
"special": true
|
| 1267 |
+
},
|
| 1268 |
+
"128158": {
|
| 1269 |
+
"content": "<|reserved_special_token_150|>",
|
| 1270 |
+
"lstrip": false,
|
| 1271 |
+
"normalized": false,
|
| 1272 |
+
"rstrip": false,
|
| 1273 |
+
"single_word": false,
|
| 1274 |
+
"special": true
|
| 1275 |
+
},
|
| 1276 |
+
"128159": {
|
| 1277 |
+
"content": "<|reserved_special_token_151|>",
|
| 1278 |
+
"lstrip": false,
|
| 1279 |
+
"normalized": false,
|
| 1280 |
+
"rstrip": false,
|
| 1281 |
+
"single_word": false,
|
| 1282 |
+
"special": true
|
| 1283 |
+
},
|
| 1284 |
+
"128160": {
|
| 1285 |
+
"content": "<|reserved_special_token_152|>",
|
| 1286 |
+
"lstrip": false,
|
| 1287 |
+
"normalized": false,
|
| 1288 |
+
"rstrip": false,
|
| 1289 |
+
"single_word": false,
|
| 1290 |
+
"special": true
|
| 1291 |
+
},
|
| 1292 |
+
"128161": {
|
| 1293 |
+
"content": "<|reserved_special_token_153|>",
|
| 1294 |
+
"lstrip": false,
|
| 1295 |
+
"normalized": false,
|
| 1296 |
+
"rstrip": false,
|
| 1297 |
+
"single_word": false,
|
| 1298 |
+
"special": true
|
| 1299 |
+
},
|
| 1300 |
+
"128162": {
|
| 1301 |
+
"content": "<|reserved_special_token_154|>",
|
| 1302 |
+
"lstrip": false,
|
| 1303 |
+
"normalized": false,
|
| 1304 |
+
"rstrip": false,
|
| 1305 |
+
"single_word": false,
|
| 1306 |
+
"special": true
|
| 1307 |
+
},
|
| 1308 |
+
"128163": {
|
| 1309 |
+
"content": "<|reserved_special_token_155|>",
|
| 1310 |
+
"lstrip": false,
|
| 1311 |
+
"normalized": false,
|
| 1312 |
+
"rstrip": false,
|
| 1313 |
+
"single_word": false,
|
| 1314 |
+
"special": true
|
| 1315 |
+
},
|
| 1316 |
+
"128164": {
|
| 1317 |
+
"content": "<|reserved_special_token_156|>",
|
| 1318 |
+
"lstrip": false,
|
| 1319 |
+
"normalized": false,
|
| 1320 |
+
"rstrip": false,
|
| 1321 |
+
"single_word": false,
|
| 1322 |
+
"special": true
|
| 1323 |
+
},
|
| 1324 |
+
"128165": {
|
| 1325 |
+
"content": "<|reserved_special_token_157|>",
|
| 1326 |
+
"lstrip": false,
|
| 1327 |
+
"normalized": false,
|
| 1328 |
+
"rstrip": false,
|
| 1329 |
+
"single_word": false,
|
| 1330 |
+
"special": true
|
| 1331 |
+
},
|
| 1332 |
+
"128166": {
|
| 1333 |
+
"content": "<|reserved_special_token_158|>",
|
| 1334 |
+
"lstrip": false,
|
| 1335 |
+
"normalized": false,
|
| 1336 |
+
"rstrip": false,
|
| 1337 |
+
"single_word": false,
|
| 1338 |
+
"special": true
|
| 1339 |
+
},
|
| 1340 |
+
"128167": {
|
| 1341 |
+
"content": "<|reserved_special_token_159|>",
|
| 1342 |
+
"lstrip": false,
|
| 1343 |
+
"normalized": false,
|
| 1344 |
+
"rstrip": false,
|
| 1345 |
+
"single_word": false,
|
| 1346 |
+
"special": true
|
| 1347 |
+
},
|
| 1348 |
+
"128168": {
|
| 1349 |
+
"content": "<|reserved_special_token_160|>",
|
| 1350 |
+
"lstrip": false,
|
| 1351 |
+
"normalized": false,
|
| 1352 |
+
"rstrip": false,
|
| 1353 |
+
"single_word": false,
|
| 1354 |
+
"special": true
|
| 1355 |
+
},
|
| 1356 |
+
"128169": {
|
| 1357 |
+
"content": "<|reserved_special_token_161|>",
|
| 1358 |
+
"lstrip": false,
|
| 1359 |
+
"normalized": false,
|
| 1360 |
+
"rstrip": false,
|
| 1361 |
+
"single_word": false,
|
| 1362 |
+
"special": true
|
| 1363 |
+
},
|
| 1364 |
+
"128170": {
|
| 1365 |
+
"content": "<|reserved_special_token_162|>",
|
| 1366 |
+
"lstrip": false,
|
| 1367 |
+
"normalized": false,
|
| 1368 |
+
"rstrip": false,
|
| 1369 |
+
"single_word": false,
|
| 1370 |
+
"special": true
|
| 1371 |
+
},
|
| 1372 |
+
"128171": {
|
| 1373 |
+
"content": "<|reserved_special_token_163|>",
|
| 1374 |
+
"lstrip": false,
|
| 1375 |
+
"normalized": false,
|
| 1376 |
+
"rstrip": false,
|
| 1377 |
+
"single_word": false,
|
| 1378 |
+
"special": true
|
| 1379 |
+
},
|
| 1380 |
+
"128172": {
|
| 1381 |
+
"content": "<|reserved_special_token_164|>",
|
| 1382 |
+
"lstrip": false,
|
| 1383 |
+
"normalized": false,
|
| 1384 |
+
"rstrip": false,
|
| 1385 |
+
"single_word": false,
|
| 1386 |
+
"special": true
|
| 1387 |
+
},
|
| 1388 |
+
"128173": {
|
| 1389 |
+
"content": "<|reserved_special_token_165|>",
|
| 1390 |
+
"lstrip": false,
|
| 1391 |
+
"normalized": false,
|
| 1392 |
+
"rstrip": false,
|
| 1393 |
+
"single_word": false,
|
| 1394 |
+
"special": true
|
| 1395 |
+
},
|
| 1396 |
+
"128174": {
|
| 1397 |
+
"content": "<|reserved_special_token_166|>",
|
| 1398 |
+
"lstrip": false,
|
| 1399 |
+
"normalized": false,
|
| 1400 |
+
"rstrip": false,
|
| 1401 |
+
"single_word": false,
|
| 1402 |
+
"special": true
|
| 1403 |
+
},
|
| 1404 |
+
"128175": {
|
| 1405 |
+
"content": "<|reserved_special_token_167|>",
|
| 1406 |
+
"lstrip": false,
|
| 1407 |
+
"normalized": false,
|
| 1408 |
+
"rstrip": false,
|
| 1409 |
+
"single_word": false,
|
| 1410 |
+
"special": true
|
| 1411 |
+
},
|
| 1412 |
+
"128176": {
|
| 1413 |
+
"content": "<|reserved_special_token_168|>",
|
| 1414 |
+
"lstrip": false,
|
| 1415 |
+
"normalized": false,
|
| 1416 |
+
"rstrip": false,
|
| 1417 |
+
"single_word": false,
|
| 1418 |
+
"special": true
|
| 1419 |
+
},
|
| 1420 |
+
"128177": {
|
| 1421 |
+
"content": "<|reserved_special_token_169|>",
|
| 1422 |
+
"lstrip": false,
|
| 1423 |
+
"normalized": false,
|
| 1424 |
+
"rstrip": false,
|
| 1425 |
+
"single_word": false,
|
| 1426 |
+
"special": true
|
| 1427 |
+
},
|
| 1428 |
+
"128178": {
|
| 1429 |
+
"content": "<|reserved_special_token_170|>",
|
| 1430 |
+
"lstrip": false,
|
| 1431 |
+
"normalized": false,
|
| 1432 |
+
"rstrip": false,
|
| 1433 |
+
"single_word": false,
|
| 1434 |
+
"special": true
|
| 1435 |
+
},
|
| 1436 |
+
"128179": {
|
| 1437 |
+
"content": "<|reserved_special_token_171|>",
|
| 1438 |
+
"lstrip": false,
|
| 1439 |
+
"normalized": false,
|
| 1440 |
+
"rstrip": false,
|
| 1441 |
+
"single_word": false,
|
| 1442 |
+
"special": true
|
| 1443 |
+
},
|
| 1444 |
+
"128180": {
|
| 1445 |
+
"content": "<|reserved_special_token_172|>",
|
| 1446 |
+
"lstrip": false,
|
| 1447 |
+
"normalized": false,
|
| 1448 |
+
"rstrip": false,
|
| 1449 |
+
"single_word": false,
|
| 1450 |
+
"special": true
|
| 1451 |
+
},
|
| 1452 |
+
"128181": {
|
| 1453 |
+
"content": "<|reserved_special_token_173|>",
|
| 1454 |
+
"lstrip": false,
|
| 1455 |
+
"normalized": false,
|
| 1456 |
+
"rstrip": false,
|
| 1457 |
+
"single_word": false,
|
| 1458 |
+
"special": true
|
| 1459 |
+
},
|
| 1460 |
+
"128182": {
|
| 1461 |
+
"content": "<|reserved_special_token_174|>",
|
| 1462 |
+
"lstrip": false,
|
| 1463 |
+
"normalized": false,
|
| 1464 |
+
"rstrip": false,
|
| 1465 |
+
"single_word": false,
|
| 1466 |
+
"special": true
|
| 1467 |
+
},
|
| 1468 |
+
"128183": {
|
| 1469 |
+
"content": "<|reserved_special_token_175|>",
|
| 1470 |
+
"lstrip": false,
|
| 1471 |
+
"normalized": false,
|
| 1472 |
+
"rstrip": false,
|
| 1473 |
+
"single_word": false,
|
| 1474 |
+
"special": true
|
| 1475 |
+
},
|
| 1476 |
+
"128184": {
|
| 1477 |
+
"content": "<|reserved_special_token_176|>",
|
| 1478 |
+
"lstrip": false,
|
| 1479 |
+
"normalized": false,
|
| 1480 |
+
"rstrip": false,
|
| 1481 |
+
"single_word": false,
|
| 1482 |
+
"special": true
|
| 1483 |
+
},
|
| 1484 |
+
"128185": {
|
| 1485 |
+
"content": "<|reserved_special_token_177|>",
|
| 1486 |
+
"lstrip": false,
|
| 1487 |
+
"normalized": false,
|
| 1488 |
+
"rstrip": false,
|
| 1489 |
+
"single_word": false,
|
| 1490 |
+
"special": true
|
| 1491 |
+
},
|
| 1492 |
+
"128186": {
|
| 1493 |
+
"content": "<|reserved_special_token_178|>",
|
| 1494 |
+
"lstrip": false,
|
| 1495 |
+
"normalized": false,
|
| 1496 |
+
"rstrip": false,
|
| 1497 |
+
"single_word": false,
|
| 1498 |
+
"special": true
|
| 1499 |
+
},
|
| 1500 |
+
"128187": {
|
| 1501 |
+
"content": "<|reserved_special_token_179|>",
|
| 1502 |
+
"lstrip": false,
|
| 1503 |
+
"normalized": false,
|
| 1504 |
+
"rstrip": false,
|
| 1505 |
+
"single_word": false,
|
| 1506 |
+
"special": true
|
| 1507 |
+
},
|
| 1508 |
+
"128188": {
|
| 1509 |
+
"content": "<|reserved_special_token_180|>",
|
| 1510 |
+
"lstrip": false,
|
| 1511 |
+
"normalized": false,
|
| 1512 |
+
"rstrip": false,
|
| 1513 |
+
"single_word": false,
|
| 1514 |
+
"special": true
|
| 1515 |
+
},
|
| 1516 |
+
"128189": {
|
| 1517 |
+
"content": "<|reserved_special_token_181|>",
|
| 1518 |
+
"lstrip": false,
|
| 1519 |
+
"normalized": false,
|
| 1520 |
+
"rstrip": false,
|
| 1521 |
+
"single_word": false,
|
| 1522 |
+
"special": true
|
| 1523 |
+
},
|
| 1524 |
+
"128190": {
|
| 1525 |
+
"content": "<|reserved_special_token_182|>",
|
| 1526 |
+
"lstrip": false,
|
| 1527 |
+
"normalized": false,
|
| 1528 |
+
"rstrip": false,
|
| 1529 |
+
"single_word": false,
|
| 1530 |
+
"special": true
|
| 1531 |
+
},
|
| 1532 |
+
"128191": {
|
| 1533 |
+
"content": "<|reserved_special_token_183|>",
|
| 1534 |
+
"lstrip": false,
|
| 1535 |
+
"normalized": false,
|
| 1536 |
+
"rstrip": false,
|
| 1537 |
+
"single_word": false,
|
| 1538 |
+
"special": true
|
| 1539 |
+
},
|
| 1540 |
+
"128192": {
|
| 1541 |
+
"content": "<|reserved_special_token_184|>",
|
| 1542 |
+
"lstrip": false,
|
| 1543 |
+
"normalized": false,
|
| 1544 |
+
"rstrip": false,
|
| 1545 |
+
"single_word": false,
|
| 1546 |
+
"special": true
|
| 1547 |
+
},
|
| 1548 |
+
"128193": {
|
| 1549 |
+
"content": "<|reserved_special_token_185|>",
|
| 1550 |
+
"lstrip": false,
|
| 1551 |
+
"normalized": false,
|
| 1552 |
+
"rstrip": false,
|
| 1553 |
+
"single_word": false,
|
| 1554 |
+
"special": true
|
| 1555 |
+
},
|
| 1556 |
+
"128194": {
|
| 1557 |
+
"content": "<|reserved_special_token_186|>",
|
| 1558 |
+
"lstrip": false,
|
| 1559 |
+
"normalized": false,
|
| 1560 |
+
"rstrip": false,
|
| 1561 |
+
"single_word": false,
|
| 1562 |
+
"special": true
|
| 1563 |
+
},
|
| 1564 |
+
"128195": {
|
| 1565 |
+
"content": "<|reserved_special_token_187|>",
|
| 1566 |
+
"lstrip": false,
|
| 1567 |
+
"normalized": false,
|
| 1568 |
+
"rstrip": false,
|
| 1569 |
+
"single_word": false,
|
| 1570 |
+
"special": true
|
| 1571 |
+
},
|
| 1572 |
+
"128196": {
|
| 1573 |
+
"content": "<|reserved_special_token_188|>",
|
| 1574 |
+
"lstrip": false,
|
| 1575 |
+
"normalized": false,
|
| 1576 |
+
"rstrip": false,
|
| 1577 |
+
"single_word": false,
|
| 1578 |
+
"special": true
|
| 1579 |
+
},
|
| 1580 |
+
"128197": {
|
| 1581 |
+
"content": "<|reserved_special_token_189|>",
|
| 1582 |
+
"lstrip": false,
|
| 1583 |
+
"normalized": false,
|
| 1584 |
+
"rstrip": false,
|
| 1585 |
+
"single_word": false,
|
| 1586 |
+
"special": true
|
| 1587 |
+
},
|
| 1588 |
+
"128198": {
|
| 1589 |
+
"content": "<|reserved_special_token_190|>",
|
| 1590 |
+
"lstrip": false,
|
| 1591 |
+
"normalized": false,
|
| 1592 |
+
"rstrip": false,
|
| 1593 |
+
"single_word": false,
|
| 1594 |
+
"special": true
|
| 1595 |
+
},
|
| 1596 |
+
"128199": {
|
| 1597 |
+
"content": "<|reserved_special_token_191|>",
|
| 1598 |
+
"lstrip": false,
|
| 1599 |
+
"normalized": false,
|
| 1600 |
+
"rstrip": false,
|
| 1601 |
+
"single_word": false,
|
| 1602 |
+
"special": true
|
| 1603 |
+
},
|
| 1604 |
+
"128200": {
|
| 1605 |
+
"content": "<|reserved_special_token_192|>",
|
| 1606 |
+
"lstrip": false,
|
| 1607 |
+
"normalized": false,
|
| 1608 |
+
"rstrip": false,
|
| 1609 |
+
"single_word": false,
|
| 1610 |
+
"special": true
|
| 1611 |
+
},
|
| 1612 |
+
"128201": {
|
| 1613 |
+
"content": "<|reserved_special_token_193|>",
|
| 1614 |
+
"lstrip": false,
|
| 1615 |
+
"normalized": false,
|
| 1616 |
+
"rstrip": false,
|
| 1617 |
+
"single_word": false,
|
| 1618 |
+
"special": true
|
| 1619 |
+
},
|
| 1620 |
+
"128202": {
|
| 1621 |
+
"content": "<|reserved_special_token_194|>",
|
| 1622 |
+
"lstrip": false,
|
| 1623 |
+
"normalized": false,
|
| 1624 |
+
"rstrip": false,
|
| 1625 |
+
"single_word": false,
|
| 1626 |
+
"special": true
|
| 1627 |
+
},
|
| 1628 |
+
"128203": {
|
| 1629 |
+
"content": "<|reserved_special_token_195|>",
|
| 1630 |
+
"lstrip": false,
|
| 1631 |
+
"normalized": false,
|
| 1632 |
+
"rstrip": false,
|
| 1633 |
+
"single_word": false,
|
| 1634 |
+
"special": true
|
| 1635 |
+
},
|
| 1636 |
+
"128204": {
|
| 1637 |
+
"content": "<|reserved_special_token_196|>",
|
| 1638 |
+
"lstrip": false,
|
| 1639 |
+
"normalized": false,
|
| 1640 |
+
"rstrip": false,
|
| 1641 |
+
"single_word": false,
|
| 1642 |
+
"special": true
|
| 1643 |
+
},
|
| 1644 |
+
"128205": {
|
| 1645 |
+
"content": "<|reserved_special_token_197|>",
|
| 1646 |
+
"lstrip": false,
|
| 1647 |
+
"normalized": false,
|
| 1648 |
+
"rstrip": false,
|
| 1649 |
+
"single_word": false,
|
| 1650 |
+
"special": true
|
| 1651 |
+
},
|
| 1652 |
+
"128206": {
|
| 1653 |
+
"content": "<|reserved_special_token_198|>",
|
| 1654 |
+
"lstrip": false,
|
| 1655 |
+
"normalized": false,
|
| 1656 |
+
"rstrip": false,
|
| 1657 |
+
"single_word": false,
|
| 1658 |
+
"special": true
|
| 1659 |
+
},
|
| 1660 |
+
"128207": {
|
| 1661 |
+
"content": "<|reserved_special_token_199|>",
|
| 1662 |
+
"lstrip": false,
|
| 1663 |
+
"normalized": false,
|
| 1664 |
+
"rstrip": false,
|
| 1665 |
+
"single_word": false,
|
| 1666 |
+
"special": true
|
| 1667 |
+
},
|
| 1668 |
+
"128208": {
|
| 1669 |
+
"content": "<|reserved_special_token_200|>",
|
| 1670 |
+
"lstrip": false,
|
| 1671 |
+
"normalized": false,
|
| 1672 |
+
"rstrip": false,
|
| 1673 |
+
"single_word": false,
|
| 1674 |
+
"special": true
|
| 1675 |
+
},
|
| 1676 |
+
"128209": {
|
| 1677 |
+
"content": "<|reserved_special_token_201|>",
|
| 1678 |
+
"lstrip": false,
|
| 1679 |
+
"normalized": false,
|
| 1680 |
+
"rstrip": false,
|
| 1681 |
+
"single_word": false,
|
| 1682 |
+
"special": true
|
| 1683 |
+
},
|
| 1684 |
+
"128210": {
|
| 1685 |
+
"content": "<|reserved_special_token_202|>",
|
| 1686 |
+
"lstrip": false,
|
| 1687 |
+
"normalized": false,
|
| 1688 |
+
"rstrip": false,
|
| 1689 |
+
"single_word": false,
|
| 1690 |
+
"special": true
|
| 1691 |
+
},
|
| 1692 |
+
"128211": {
|
| 1693 |
+
"content": "<|reserved_special_token_203|>",
|
| 1694 |
+
"lstrip": false,
|
| 1695 |
+
"normalized": false,
|
| 1696 |
+
"rstrip": false,
|
| 1697 |
+
"single_word": false,
|
| 1698 |
+
"special": true
|
| 1699 |
+
},
|
| 1700 |
+
"128212": {
|
| 1701 |
+
"content": "<|reserved_special_token_204|>",
|
| 1702 |
+
"lstrip": false,
|
| 1703 |
+
"normalized": false,
|
| 1704 |
+
"rstrip": false,
|
| 1705 |
+
"single_word": false,
|
| 1706 |
+
"special": true
|
| 1707 |
+
},
|
| 1708 |
+
"128213": {
|
| 1709 |
+
"content": "<|reserved_special_token_205|>",
|
| 1710 |
+
"lstrip": false,
|
| 1711 |
+
"normalized": false,
|
| 1712 |
+
"rstrip": false,
|
| 1713 |
+
"single_word": false,
|
| 1714 |
+
"special": true
|
| 1715 |
+
},
|
| 1716 |
+
"128214": {
|
| 1717 |
+
"content": "<|reserved_special_token_206|>",
|
| 1718 |
+
"lstrip": false,
|
| 1719 |
+
"normalized": false,
|
| 1720 |
+
"rstrip": false,
|
| 1721 |
+
"single_word": false,
|
| 1722 |
+
"special": true
|
| 1723 |
+
},
|
| 1724 |
+
"128215": {
|
| 1725 |
+
"content": "<|reserved_special_token_207|>",
|
| 1726 |
+
"lstrip": false,
|
| 1727 |
+
"normalized": false,
|
| 1728 |
+
"rstrip": false,
|
| 1729 |
+
"single_word": false,
|
| 1730 |
+
"special": true
|
| 1731 |
+
},
|
| 1732 |
+
"128216": {
|
| 1733 |
+
"content": "<|reserved_special_token_208|>",
|
| 1734 |
+
"lstrip": false,
|
| 1735 |
+
"normalized": false,
|
| 1736 |
+
"rstrip": false,
|
| 1737 |
+
"single_word": false,
|
| 1738 |
+
"special": true
|
| 1739 |
+
},
|
| 1740 |
+
"128217": {
|
| 1741 |
+
"content": "<|reserved_special_token_209|>",
|
| 1742 |
+
"lstrip": false,
|
| 1743 |
+
"normalized": false,
|
| 1744 |
+
"rstrip": false,
|
| 1745 |
+
"single_word": false,
|
| 1746 |
+
"special": true
|
| 1747 |
+
},
|
| 1748 |
+
"128218": {
|
| 1749 |
+
"content": "<|reserved_special_token_210|>",
|
| 1750 |
+
"lstrip": false,
|
| 1751 |
+
"normalized": false,
|
| 1752 |
+
"rstrip": false,
|
| 1753 |
+
"single_word": false,
|
| 1754 |
+
"special": true
|
| 1755 |
+
},
|
| 1756 |
+
"128219": {
|
| 1757 |
+
"content": "<|reserved_special_token_211|>",
|
| 1758 |
+
"lstrip": false,
|
| 1759 |
+
"normalized": false,
|
| 1760 |
+
"rstrip": false,
|
| 1761 |
+
"single_word": false,
|
| 1762 |
+
"special": true
|
| 1763 |
+
},
|
| 1764 |
+
"128220": {
|
| 1765 |
+
"content": "<|reserved_special_token_212|>",
|
| 1766 |
+
"lstrip": false,
|
| 1767 |
+
"normalized": false,
|
| 1768 |
+
"rstrip": false,
|
| 1769 |
+
"single_word": false,
|
| 1770 |
+
"special": true
|
| 1771 |
+
},
|
| 1772 |
+
"128221": {
|
| 1773 |
+
"content": "<|reserved_special_token_213|>",
|
| 1774 |
+
"lstrip": false,
|
| 1775 |
+
"normalized": false,
|
| 1776 |
+
"rstrip": false,
|
| 1777 |
+
"single_word": false,
|
| 1778 |
+
"special": true
|
| 1779 |
+
},
|
| 1780 |
+
"128222": {
|
| 1781 |
+
"content": "<|reserved_special_token_214|>",
|
| 1782 |
+
"lstrip": false,
|
| 1783 |
+
"normalized": false,
|
| 1784 |
+
"rstrip": false,
|
| 1785 |
+
"single_word": false,
|
| 1786 |
+
"special": true
|
| 1787 |
+
},
|
| 1788 |
+
"128223": {
|
| 1789 |
+
"content": "<|reserved_special_token_215|>",
|
| 1790 |
+
"lstrip": false,
|
| 1791 |
+
"normalized": false,
|
| 1792 |
+
"rstrip": false,
|
| 1793 |
+
"single_word": false,
|
| 1794 |
+
"special": true
|
| 1795 |
+
},
|
| 1796 |
+
"128224": {
|
| 1797 |
+
"content": "<|reserved_special_token_216|>",
|
| 1798 |
+
"lstrip": false,
|
| 1799 |
+
"normalized": false,
|
| 1800 |
+
"rstrip": false,
|
| 1801 |
+
"single_word": false,
|
| 1802 |
+
"special": true
|
| 1803 |
+
},
|
| 1804 |
+
"128225": {
|
| 1805 |
+
"content": "<|reserved_special_token_217|>",
|
| 1806 |
+
"lstrip": false,
|
| 1807 |
+
"normalized": false,
|
| 1808 |
+
"rstrip": false,
|
| 1809 |
+
"single_word": false,
|
| 1810 |
+
"special": true
|
| 1811 |
+
},
|
| 1812 |
+
"128226": {
|
| 1813 |
+
"content": "<|reserved_special_token_218|>",
|
| 1814 |
+
"lstrip": false,
|
| 1815 |
+
"normalized": false,
|
| 1816 |
+
"rstrip": false,
|
| 1817 |
+
"single_word": false,
|
| 1818 |
+
"special": true
|
| 1819 |
+
},
|
| 1820 |
+
"128227": {
|
| 1821 |
+
"content": "<|reserved_special_token_219|>",
|
| 1822 |
+
"lstrip": false,
|
| 1823 |
+
"normalized": false,
|
| 1824 |
+
"rstrip": false,
|
| 1825 |
+
"single_word": false,
|
| 1826 |
+
"special": true
|
| 1827 |
+
},
|
| 1828 |
+
"128228": {
|
| 1829 |
+
"content": "<|reserved_special_token_220|>",
|
| 1830 |
+
"lstrip": false,
|
| 1831 |
+
"normalized": false,
|
| 1832 |
+
"rstrip": false,
|
| 1833 |
+
"single_word": false,
|
| 1834 |
+
"special": true
|
| 1835 |
+
},
|
| 1836 |
+
"128229": {
|
| 1837 |
+
"content": "<|reserved_special_token_221|>",
|
| 1838 |
+
"lstrip": false,
|
| 1839 |
+
"normalized": false,
|
| 1840 |
+
"rstrip": false,
|
| 1841 |
+
"single_word": false,
|
| 1842 |
+
"special": true
|
| 1843 |
+
},
|
| 1844 |
+
"128230": {
|
| 1845 |
+
"content": "<|reserved_special_token_222|>",
|
| 1846 |
+
"lstrip": false,
|
| 1847 |
+
"normalized": false,
|
| 1848 |
+
"rstrip": false,
|
| 1849 |
+
"single_word": false,
|
| 1850 |
+
"special": true
|
| 1851 |
+
},
|
| 1852 |
+
"128231": {
|
| 1853 |
+
"content": "<|reserved_special_token_223|>",
|
| 1854 |
+
"lstrip": false,
|
| 1855 |
+
"normalized": false,
|
| 1856 |
+
"rstrip": false,
|
| 1857 |
+
"single_word": false,
|
| 1858 |
+
"special": true
|
| 1859 |
+
},
|
| 1860 |
+
"128232": {
|
| 1861 |
+
"content": "<|reserved_special_token_224|>",
|
| 1862 |
+
"lstrip": false,
|
| 1863 |
+
"normalized": false,
|
| 1864 |
+
"rstrip": false,
|
| 1865 |
+
"single_word": false,
|
| 1866 |
+
"special": true
|
| 1867 |
+
},
|
| 1868 |
+
"128233": {
|
| 1869 |
+
"content": "<|reserved_special_token_225|>",
|
| 1870 |
+
"lstrip": false,
|
| 1871 |
+
"normalized": false,
|
| 1872 |
+
"rstrip": false,
|
| 1873 |
+
"single_word": false,
|
| 1874 |
+
"special": true
|
| 1875 |
+
},
|
| 1876 |
+
"128234": {
|
| 1877 |
+
"content": "<|reserved_special_token_226|>",
|
| 1878 |
+
"lstrip": false,
|
| 1879 |
+
"normalized": false,
|
| 1880 |
+
"rstrip": false,
|
| 1881 |
+
"single_word": false,
|
| 1882 |
+
"special": true
|
| 1883 |
+
},
|
| 1884 |
+
"128235": {
|
| 1885 |
+
"content": "<|reserved_special_token_227|>",
|
| 1886 |
+
"lstrip": false,
|
| 1887 |
+
"normalized": false,
|
| 1888 |
+
"rstrip": false,
|
| 1889 |
+
"single_word": false,
|
| 1890 |
+
"special": true
|
| 1891 |
+
},
|
| 1892 |
+
"128236": {
|
| 1893 |
+
"content": "<|reserved_special_token_228|>",
|
| 1894 |
+
"lstrip": false,
|
| 1895 |
+
"normalized": false,
|
| 1896 |
+
"rstrip": false,
|
| 1897 |
+
"single_word": false,
|
| 1898 |
+
"special": true
|
| 1899 |
+
},
|
| 1900 |
+
"128237": {
|
| 1901 |
+
"content": "<|reserved_special_token_229|>",
|
| 1902 |
+
"lstrip": false,
|
| 1903 |
+
"normalized": false,
|
| 1904 |
+
"rstrip": false,
|
| 1905 |
+
"single_word": false,
|
| 1906 |
+
"special": true
|
| 1907 |
+
},
|
| 1908 |
+
"128238": {
|
| 1909 |
+
"content": "<|reserved_special_token_230|>",
|
| 1910 |
+
"lstrip": false,
|
| 1911 |
+
"normalized": false,
|
| 1912 |
+
"rstrip": false,
|
| 1913 |
+
"single_word": false,
|
| 1914 |
+
"special": true
|
| 1915 |
+
},
|
| 1916 |
+
"128239": {
|
| 1917 |
+
"content": "<|reserved_special_token_231|>",
|
| 1918 |
+
"lstrip": false,
|
| 1919 |
+
"normalized": false,
|
| 1920 |
+
"rstrip": false,
|
| 1921 |
+
"single_word": false,
|
| 1922 |
+
"special": true
|
| 1923 |
+
},
|
| 1924 |
+
"128240": {
|
| 1925 |
+
"content": "<|reserved_special_token_232|>",
|
| 1926 |
+
"lstrip": false,
|
| 1927 |
+
"normalized": false,
|
| 1928 |
+
"rstrip": false,
|
| 1929 |
+
"single_word": false,
|
| 1930 |
+
"special": true
|
| 1931 |
+
},
|
| 1932 |
+
"128241": {
|
| 1933 |
+
"content": "<|reserved_special_token_233|>",
|
| 1934 |
+
"lstrip": false,
|
| 1935 |
+
"normalized": false,
|
| 1936 |
+
"rstrip": false,
|
| 1937 |
+
"single_word": false,
|
| 1938 |
+
"special": true
|
| 1939 |
+
},
|
| 1940 |
+
"128242": {
|
| 1941 |
+
"content": "<|reserved_special_token_234|>",
|
| 1942 |
+
"lstrip": false,
|
| 1943 |
+
"normalized": false,
|
| 1944 |
+
"rstrip": false,
|
| 1945 |
+
"single_word": false,
|
| 1946 |
+
"special": true
|
| 1947 |
+
},
|
| 1948 |
+
"128243": {
|
| 1949 |
+
"content": "<|reserved_special_token_235|>",
|
| 1950 |
+
"lstrip": false,
|
| 1951 |
+
"normalized": false,
|
| 1952 |
+
"rstrip": false,
|
| 1953 |
+
"single_word": false,
|
| 1954 |
+
"special": true
|
| 1955 |
+
},
|
| 1956 |
+
"128244": {
|
| 1957 |
+
"content": "<|reserved_special_token_236|>",
|
| 1958 |
+
"lstrip": false,
|
| 1959 |
+
"normalized": false,
|
| 1960 |
+
"rstrip": false,
|
| 1961 |
+
"single_word": false,
|
| 1962 |
+
"special": true
|
| 1963 |
+
},
|
| 1964 |
+
"128245": {
|
| 1965 |
+
"content": "<|reserved_special_token_237|>",
|
| 1966 |
+
"lstrip": false,
|
| 1967 |
+
"normalized": false,
|
| 1968 |
+
"rstrip": false,
|
| 1969 |
+
"single_word": false,
|
| 1970 |
+
"special": true
|
| 1971 |
+
},
|
| 1972 |
+
"128246": {
|
| 1973 |
+
"content": "<|reserved_special_token_238|>",
|
| 1974 |
+
"lstrip": false,
|
| 1975 |
+
"normalized": false,
|
| 1976 |
+
"rstrip": false,
|
| 1977 |
+
"single_word": false,
|
| 1978 |
+
"special": true
|
| 1979 |
+
},
|
| 1980 |
+
"128247": {
|
| 1981 |
+
"content": "<|reserved_special_token_239|>",
|
| 1982 |
+
"lstrip": false,
|
| 1983 |
+
"normalized": false,
|
| 1984 |
+
"rstrip": false,
|
| 1985 |
+
"single_word": false,
|
| 1986 |
+
"special": true
|
| 1987 |
+
},
|
| 1988 |
+
"128248": {
|
| 1989 |
+
"content": "<|reserved_special_token_240|>",
|
| 1990 |
+
"lstrip": false,
|
| 1991 |
+
"normalized": false,
|
| 1992 |
+
"rstrip": false,
|
| 1993 |
+
"single_word": false,
|
| 1994 |
+
"special": true
|
| 1995 |
+
},
|
| 1996 |
+
"128249": {
|
| 1997 |
+
"content": "<|reserved_special_token_241|>",
|
| 1998 |
+
"lstrip": false,
|
| 1999 |
+
"normalized": false,
|
| 2000 |
+
"rstrip": false,
|
| 2001 |
+
"single_word": false,
|
| 2002 |
+
"special": true
|
| 2003 |
+
},
|
| 2004 |
+
"128250": {
|
| 2005 |
+
"content": "<|reserved_special_token_242|>",
|
| 2006 |
+
"lstrip": false,
|
| 2007 |
+
"normalized": false,
|
| 2008 |
+
"rstrip": false,
|
| 2009 |
+
"single_word": false,
|
| 2010 |
+
"special": true
|
| 2011 |
+
},
|
| 2012 |
+
"128251": {
|
| 2013 |
+
"content": "<|reserved_special_token_243|>",
|
| 2014 |
+
"lstrip": false,
|
| 2015 |
+
"normalized": false,
|
| 2016 |
+
"rstrip": false,
|
| 2017 |
+
"single_word": false,
|
| 2018 |
+
"special": true
|
| 2019 |
+
},
|
| 2020 |
+
"128252": {
|
| 2021 |
+
"content": "<|reserved_special_token_244|>",
|
| 2022 |
+
"lstrip": false,
|
| 2023 |
+
"normalized": false,
|
| 2024 |
+
"rstrip": false,
|
| 2025 |
+
"single_word": false,
|
| 2026 |
+
"special": true
|
| 2027 |
+
},
|
| 2028 |
+
"128253": {
|
| 2029 |
+
"content": "<|reserved_special_token_245|>",
|
| 2030 |
+
"lstrip": false,
|
| 2031 |
+
"normalized": false,
|
| 2032 |
+
"rstrip": false,
|
| 2033 |
+
"single_word": false,
|
| 2034 |
+
"special": true
|
| 2035 |
+
},
|
| 2036 |
+
"128254": {
|
| 2037 |
+
"content": "<|reserved_special_token_246|>",
|
| 2038 |
+
"lstrip": false,
|
| 2039 |
+
"normalized": false,
|
| 2040 |
+
"rstrip": false,
|
| 2041 |
+
"single_word": false,
|
| 2042 |
+
"special": true
|
| 2043 |
+
},
|
| 2044 |
+
"128255": {
|
| 2045 |
+
"content": "<|reserved_special_token_247|>",
|
| 2046 |
+
"lstrip": false,
|
| 2047 |
+
"normalized": false,
|
| 2048 |
+
"rstrip": false,
|
| 2049 |
+
"single_word": false,
|
| 2050 |
+
"special": true
|
| 2051 |
+
}
|
| 2052 |
+
},
|
| 2053 |
+
"bos_token": "<|begin_of_text|>",
|
| 2054 |
+
"clean_up_tokenization_spaces": true,
|
| 2055 |
+
"eos_token": "<|end_of_text|>",
|
| 2056 |
+
"extra_special_tokens": {},
|
| 2057 |
+
"model_input_names": [
|
| 2058 |
+
"input_ids",
|
| 2059 |
+
"attention_mask"
|
| 2060 |
+
],
|
| 2061 |
+
"model_max_length": 131072,
|
| 2062 |
+
"pad_token": "<|finetune_right_pad_id|>",
|
| 2063 |
+
"padding_side": "right",
|
| 2064 |
+
"tokenizer_class": "PreTrainedTokenizer",
|
| 2065 |
+
"unk_token": null
|
| 2066 |
+
}
|
outputs/checkpoint-60/trainer_state.json
ADDED
|
@@ -0,0 +1,454 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": null,
|
| 3 |
+
"best_metric": null,
|
| 4 |
+
"best_model_checkpoint": null,
|
| 5 |
+
"epoch": 0.003688845852353945,
|
| 6 |
+
"eval_steps": 10,
|
| 7 |
+
"global_step": 60,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 6.148076420589908e-05,
|
| 14 |
+
"grad_norm": 0.003479025326669216,
|
| 15 |
+
"learning_rate": 0.0,
|
| 16 |
+
"loss": 0.0,
|
| 17 |
+
"step": 1
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"epoch": 0.00012296152841179817,
|
| 21 |
+
"grad_norm": 0.005437533836811781,
|
| 22 |
+
"learning_rate": 4e-05,
|
| 23 |
+
"loss": 0.0001,
|
| 24 |
+
"step": 2
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"epoch": 0.00018444229261769724,
|
| 28 |
+
"grad_norm": 0.00029893239843659103,
|
| 29 |
+
"learning_rate": 8e-05,
|
| 30 |
+
"loss": 0.0,
|
| 31 |
+
"step": 3
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"epoch": 0.00024592305682359633,
|
| 35 |
+
"grad_norm": 0.00016204842540901154,
|
| 36 |
+
"learning_rate": 0.00012,
|
| 37 |
+
"loss": 0.0,
|
| 38 |
+
"step": 4
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"epoch": 0.0003074038210294954,
|
| 42 |
+
"grad_norm": 0.00012208402040414512,
|
| 43 |
+
"learning_rate": 0.00016,
|
| 44 |
+
"loss": 0.0,
|
| 45 |
+
"step": 5
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"epoch": 0.0003688845852353945,
|
| 49 |
+
"grad_norm": 0.0022303895093500614,
|
| 50 |
+
"learning_rate": 0.0002,
|
| 51 |
+
"loss": 0.0,
|
| 52 |
+
"step": 6
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"epoch": 0.00043036534944129355,
|
| 56 |
+
"grad_norm": 0.0020995561499148607,
|
| 57 |
+
"learning_rate": 0.00019636363636363636,
|
| 58 |
+
"loss": 0.0,
|
| 59 |
+
"step": 7
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"epoch": 0.0004918461136471927,
|
| 63 |
+
"grad_norm": 0.008035325445234776,
|
| 64 |
+
"learning_rate": 0.00019272727272727274,
|
| 65 |
+
"loss": 0.0001,
|
| 66 |
+
"step": 8
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"epoch": 0.0005533268778530917,
|
| 70 |
+
"grad_norm": 0.05308253690600395,
|
| 71 |
+
"learning_rate": 0.0001890909090909091,
|
| 72 |
+
"loss": 0.0026,
|
| 73 |
+
"step": 9
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"epoch": 0.0006148076420589908,
|
| 77 |
+
"grad_norm": 0.005033898167312145,
|
| 78 |
+
"learning_rate": 0.00018545454545454545,
|
| 79 |
+
"loss": 0.0002,
|
| 80 |
+
"step": 10
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"epoch": 0.0006762884062648898,
|
| 84 |
+
"grad_norm": 0.000410953420214355,
|
| 85 |
+
"learning_rate": 0.00018181818181818183,
|
| 86 |
+
"loss": 0.0,
|
| 87 |
+
"step": 11
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"epoch": 0.000737769170470789,
|
| 91 |
+
"grad_norm": 0.044024836272001266,
|
| 92 |
+
"learning_rate": 0.0001781818181818182,
|
| 93 |
+
"loss": 0.0004,
|
| 94 |
+
"step": 12
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"epoch": 0.0007992499346766881,
|
| 98 |
+
"grad_norm": 0.01820911094546318,
|
| 99 |
+
"learning_rate": 0.00017454545454545454,
|
| 100 |
+
"loss": 0.0001,
|
| 101 |
+
"step": 13
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"epoch": 0.0008607306988825871,
|
| 105 |
+
"grad_norm": 0.012782749719917774,
|
| 106 |
+
"learning_rate": 0.0001709090909090909,
|
| 107 |
+
"loss": 0.0002,
|
| 108 |
+
"step": 14
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"epoch": 0.0009222114630884862,
|
| 112 |
+
"grad_norm": 0.0022124142851680517,
|
| 113 |
+
"learning_rate": 0.00016727272727272728,
|
| 114 |
+
"loss": 0.0,
|
| 115 |
+
"step": 15
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"epoch": 0.0009836922272943853,
|
| 119 |
+
"grad_norm": 0.007108100224286318,
|
| 120 |
+
"learning_rate": 0.00016363636363636366,
|
| 121 |
+
"loss": 0.0001,
|
| 122 |
+
"step": 16
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"epoch": 0.0010451729915002842,
|
| 126 |
+
"grad_norm": 0.000606991583481431,
|
| 127 |
+
"learning_rate": 0.00016,
|
| 128 |
+
"loss": 0.0,
|
| 129 |
+
"step": 17
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"epoch": 0.0011066537557061834,
|
| 133 |
+
"grad_norm": 0.005945051088929176,
|
| 134 |
+
"learning_rate": 0.00015636363636363637,
|
| 135 |
+
"loss": 0.0002,
|
| 136 |
+
"step": 18
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"epoch": 0.0011681345199120825,
|
| 140 |
+
"grad_norm": 0.05237742140889168,
|
| 141 |
+
"learning_rate": 0.00015272727272727275,
|
| 142 |
+
"loss": 0.0007,
|
| 143 |
+
"step": 19
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"epoch": 0.0012296152841179816,
|
| 147 |
+
"grad_norm": 0.01273553166538477,
|
| 148 |
+
"learning_rate": 0.0001490909090909091,
|
| 149 |
+
"loss": 0.0002,
|
| 150 |
+
"step": 20
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"epoch": 0.0012910960483238807,
|
| 154 |
+
"grad_norm": 0.0036445350851863623,
|
| 155 |
+
"learning_rate": 0.00014545454545454546,
|
| 156 |
+
"loss": 0.0001,
|
| 157 |
+
"step": 21
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"epoch": 0.0013525768125297797,
|
| 161 |
+
"grad_norm": 0.08508655428886414,
|
| 162 |
+
"learning_rate": 0.00014181818181818184,
|
| 163 |
+
"loss": 0.0083,
|
| 164 |
+
"step": 22
|
| 165 |
+
},
|
| 166 |
+
{
|
| 167 |
+
"epoch": 0.0014140575767356788,
|
| 168 |
+
"grad_norm": 0.043661970645189285,
|
| 169 |
+
"learning_rate": 0.0001381818181818182,
|
| 170 |
+
"loss": 0.004,
|
| 171 |
+
"step": 23
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"epoch": 0.001475538340941578,
|
| 175 |
+
"grad_norm": 0.000659774465020746,
|
| 176 |
+
"learning_rate": 0.00013454545454545455,
|
| 177 |
+
"loss": 0.0,
|
| 178 |
+
"step": 24
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"epoch": 0.001537019105147477,
|
| 182 |
+
"grad_norm": 0.0005294107249937952,
|
| 183 |
+
"learning_rate": 0.00013090909090909093,
|
| 184 |
+
"loss": 0.0,
|
| 185 |
+
"step": 25
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"epoch": 0.0015984998693533761,
|
| 189 |
+
"grad_norm": 0.00016782127204351127,
|
| 190 |
+
"learning_rate": 0.00012727272727272728,
|
| 191 |
+
"loss": 0.0,
|
| 192 |
+
"step": 26
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"epoch": 0.001659980633559275,
|
| 196 |
+
"grad_norm": 0.041384413838386536,
|
| 197 |
+
"learning_rate": 0.00012363636363636364,
|
| 198 |
+
"loss": 0.0008,
|
| 199 |
+
"step": 27
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"epoch": 0.0017214613977651742,
|
| 203 |
+
"grad_norm": 0.009205852635204792,
|
| 204 |
+
"learning_rate": 0.00012,
|
| 205 |
+
"loss": 0.0001,
|
| 206 |
+
"step": 28
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"epoch": 0.0017829421619710733,
|
| 210 |
+
"grad_norm": 0.00016076133761089295,
|
| 211 |
+
"learning_rate": 0.00011636363636363636,
|
| 212 |
+
"loss": 0.0,
|
| 213 |
+
"step": 29
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"epoch": 0.0018444229261769724,
|
| 217 |
+
"grad_norm": 0.0002819328219629824,
|
| 218 |
+
"learning_rate": 0.00011272727272727272,
|
| 219 |
+
"loss": 0.0,
|
| 220 |
+
"step": 30
|
| 221 |
+
},
|
| 222 |
+
{
|
| 223 |
+
"epoch": 0.0019059036903828716,
|
| 224 |
+
"grad_norm": 0.00022894897847436368,
|
| 225 |
+
"learning_rate": 0.00010909090909090909,
|
| 226 |
+
"loss": 0.0,
|
| 227 |
+
"step": 31
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"epoch": 0.0019673844545887707,
|
| 231 |
+
"grad_norm": 0.014002679847180843,
|
| 232 |
+
"learning_rate": 0.00010545454545454545,
|
| 233 |
+
"loss": 0.0022,
|
| 234 |
+
"step": 32
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"epoch": 0.00202886521879467,
|
| 238 |
+
"grad_norm": 0.00016014272114261985,
|
| 239 |
+
"learning_rate": 0.00010181818181818181,
|
| 240 |
+
"loss": 0.0,
|
| 241 |
+
"step": 33
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"epoch": 0.0020903459830005685,
|
| 245 |
+
"grad_norm": 0.20411507785320282,
|
| 246 |
+
"learning_rate": 9.818181818181818e-05,
|
| 247 |
+
"loss": 0.0012,
|
| 248 |
+
"step": 34
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"epoch": 0.0021518267472064676,
|
| 252 |
+
"grad_norm": 0.013666906394064426,
|
| 253 |
+
"learning_rate": 9.454545454545455e-05,
|
| 254 |
+
"loss": 0.0001,
|
| 255 |
+
"step": 35
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"epoch": 0.0022133075114123667,
|
| 259 |
+
"grad_norm": 0.005049742292612791,
|
| 260 |
+
"learning_rate": 9.090909090909092e-05,
|
| 261 |
+
"loss": 0.0001,
|
| 262 |
+
"step": 36
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"epoch": 0.002274788275618266,
|
| 266 |
+
"grad_norm": 0.0020620787981897593,
|
| 267 |
+
"learning_rate": 8.727272727272727e-05,
|
| 268 |
+
"loss": 0.0,
|
| 269 |
+
"step": 37
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"epoch": 0.002336269039824165,
|
| 273 |
+
"grad_norm": 0.023437755182385445,
|
| 274 |
+
"learning_rate": 8.363636363636364e-05,
|
| 275 |
+
"loss": 0.0009,
|
| 276 |
+
"step": 38
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"epoch": 0.002397749804030064,
|
| 280 |
+
"grad_norm": 0.0012406132882460952,
|
| 281 |
+
"learning_rate": 8e-05,
|
| 282 |
+
"loss": 0.0001,
|
| 283 |
+
"step": 39
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"epoch": 0.0024592305682359632,
|
| 287 |
+
"grad_norm": 0.0004515462787821889,
|
| 288 |
+
"learning_rate": 7.636363636363637e-05,
|
| 289 |
+
"loss": 0.0,
|
| 290 |
+
"step": 40
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"epoch": 0.0025207113324418624,
|
| 294 |
+
"grad_norm": 0.0053300075232982635,
|
| 295 |
+
"learning_rate": 7.272727272727273e-05,
|
| 296 |
+
"loss": 0.0,
|
| 297 |
+
"step": 41
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"epoch": 0.0025821920966477615,
|
| 301 |
+
"grad_norm": 0.012812405824661255,
|
| 302 |
+
"learning_rate": 6.90909090909091e-05,
|
| 303 |
+
"loss": 0.0005,
|
| 304 |
+
"step": 42
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"epoch": 0.0026436728608536606,
|
| 308 |
+
"grad_norm": 0.0002906050067394972,
|
| 309 |
+
"learning_rate": 6.545454545454546e-05,
|
| 310 |
+
"loss": 0.0,
|
| 311 |
+
"step": 43
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"epoch": 0.0027051536250595593,
|
| 315 |
+
"grad_norm": 0.0020773536525666714,
|
| 316 |
+
"learning_rate": 6.181818181818182e-05,
|
| 317 |
+
"loss": 0.0001,
|
| 318 |
+
"step": 44
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"epoch": 0.0027666343892654584,
|
| 322 |
+
"grad_norm": 0.0004460048221517354,
|
| 323 |
+
"learning_rate": 5.818181818181818e-05,
|
| 324 |
+
"loss": 0.0,
|
| 325 |
+
"step": 45
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"epoch": 0.0028281151534713576,
|
| 329 |
+
"grad_norm": 0.004530001897364855,
|
| 330 |
+
"learning_rate": 5.4545454545454546e-05,
|
| 331 |
+
"loss": 0.0001,
|
| 332 |
+
"step": 46
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"epoch": 0.0028895959176772567,
|
| 336 |
+
"grad_norm": 0.0008538271649740636,
|
| 337 |
+
"learning_rate": 5.090909090909091e-05,
|
| 338 |
+
"loss": 0.0,
|
| 339 |
+
"step": 47
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"epoch": 0.002951076681883156,
|
| 343 |
+
"grad_norm": 0.01173599623143673,
|
| 344 |
+
"learning_rate": 4.7272727272727275e-05,
|
| 345 |
+
"loss": 0.0006,
|
| 346 |
+
"step": 48
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"epoch": 0.003012557446089055,
|
| 350 |
+
"grad_norm": 0.10636617243289948,
|
| 351 |
+
"learning_rate": 4.3636363636363636e-05,
|
| 352 |
+
"loss": 0.009,
|
| 353 |
+
"step": 49
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"epoch": 0.003074038210294954,
|
| 357 |
+
"grad_norm": 0.019629845395684242,
|
| 358 |
+
"learning_rate": 4e-05,
|
| 359 |
+
"loss": 0.0004,
|
| 360 |
+
"step": 50
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"epoch": 0.003135518974500853,
|
| 364 |
+
"grad_norm": 0.025700179859995842,
|
| 365 |
+
"learning_rate": 3.6363636363636364e-05,
|
| 366 |
+
"loss": 0.0001,
|
| 367 |
+
"step": 51
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"epoch": 0.0031969997387067523,
|
| 371 |
+
"grad_norm": 0.004359859973192215,
|
| 372 |
+
"learning_rate": 3.272727272727273e-05,
|
| 373 |
+
"loss": 0.0,
|
| 374 |
+
"step": 52
|
| 375 |
+
},
|
| 376 |
+
{
|
| 377 |
+
"epoch": 0.003258480502912651,
|
| 378 |
+
"grad_norm": 0.0004983420949429274,
|
| 379 |
+
"learning_rate": 2.909090909090909e-05,
|
| 380 |
+
"loss": 0.0,
|
| 381 |
+
"step": 53
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"epoch": 0.00331996126711855,
|
| 385 |
+
"grad_norm": 0.0008296012529172003,
|
| 386 |
+
"learning_rate": 2.5454545454545454e-05,
|
| 387 |
+
"loss": 0.0,
|
| 388 |
+
"step": 54
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"epoch": 0.0033814420313244492,
|
| 392 |
+
"grad_norm": 0.0012659059138968587,
|
| 393 |
+
"learning_rate": 2.1818181818181818e-05,
|
| 394 |
+
"loss": 0.0,
|
| 395 |
+
"step": 55
|
| 396 |
+
},
|
| 397 |
+
{
|
| 398 |
+
"epoch": 0.0034429227955303484,
|
| 399 |
+
"grad_norm": 0.0003076361317653209,
|
| 400 |
+
"learning_rate": 1.8181818181818182e-05,
|
| 401 |
+
"loss": 0.0,
|
| 402 |
+
"step": 56
|
| 403 |
+
},
|
| 404 |
+
{
|
| 405 |
+
"epoch": 0.0035044035597362475,
|
| 406 |
+
"grad_norm": 0.0007758835563436151,
|
| 407 |
+
"learning_rate": 1.4545454545454545e-05,
|
| 408 |
+
"loss": 0.0,
|
| 409 |
+
"step": 57
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"epoch": 0.0035658843239421466,
|
| 413 |
+
"grad_norm": 0.00053775793639943,
|
| 414 |
+
"learning_rate": 1.0909090909090909e-05,
|
| 415 |
+
"loss": 0.0,
|
| 416 |
+
"step": 58
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"epoch": 0.0036273650881480457,
|
| 420 |
+
"grad_norm": 0.0034126462414860725,
|
| 421 |
+
"learning_rate": 7.272727272727272e-06,
|
| 422 |
+
"loss": 0.0002,
|
| 423 |
+
"step": 59
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"epoch": 0.003688845852353945,
|
| 427 |
+
"grad_norm": 0.0009407071629539132,
|
| 428 |
+
"learning_rate": 3.636363636363636e-06,
|
| 429 |
+
"loss": 0.0,
|
| 430 |
+
"step": 60
|
| 431 |
+
}
|
| 432 |
+
],
|
| 433 |
+
"logging_steps": 1,
|
| 434 |
+
"max_steps": 60,
|
| 435 |
+
"num_input_tokens_seen": 0,
|
| 436 |
+
"num_train_epochs": 1,
|
| 437 |
+
"save_steps": 500,
|
| 438 |
+
"stateful_callbacks": {
|
| 439 |
+
"TrainerControl": {
|
| 440 |
+
"args": {
|
| 441 |
+
"should_epoch_stop": false,
|
| 442 |
+
"should_evaluate": false,
|
| 443 |
+
"should_log": false,
|
| 444 |
+
"should_save": true,
|
| 445 |
+
"should_training_stop": true
|
| 446 |
+
},
|
| 447 |
+
"attributes": {}
|
| 448 |
+
}
|
| 449 |
+
},
|
| 450 |
+
"total_flos": 1.12138907332608e+16,
|
| 451 |
+
"train_batch_size": 2,
|
| 452 |
+
"trial_name": null,
|
| 453 |
+
"trial_params": null
|
| 454 |
+
}
|
outputs/checkpoint-60/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ccb4a00e355c969dd67e87e788f5cd22f28c07668e4d8f3457bdd344543ea888
|
| 3 |
+
size 5969
|
phase_1_ensemble.ipynb
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
+
"id": "884cc4c7",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [
|
| 9 |
+
{
|
| 10 |
+
"data": {
|
| 11 |
+
"text/html": [
|
| 12 |
+
"<div>\n",
|
| 13 |
+
"<style scoped>\n",
|
| 14 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 15 |
+
" vertical-align: middle;\n",
|
| 16 |
+
" }\n",
|
| 17 |
+
"\n",
|
| 18 |
+
" .dataframe tbody tr th {\n",
|
| 19 |
+
" vertical-align: top;\n",
|
| 20 |
+
" }\n",
|
| 21 |
+
"\n",
|
| 22 |
+
" .dataframe thead th {\n",
|
| 23 |
+
" text-align: right;\n",
|
| 24 |
+
" }\n",
|
| 25 |
+
"</style>\n",
|
| 26 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 27 |
+
" <thead>\n",
|
| 28 |
+
" <tr style=\"text-align: right;\">\n",
|
| 29 |
+
" <th></th>\n",
|
| 30 |
+
" <th>Input</th>\n",
|
| 31 |
+
" <th>Tags</th>\n",
|
| 32 |
+
" </tr>\n",
|
| 33 |
+
" </thead>\n",
|
| 34 |
+
" <tbody>\n",
|
| 35 |
+
" <tr>\n",
|
| 36 |
+
" <th>0</th>\n",
|
| 37 |
+
" <td>Title: What is the effective differencial effe...</td>\n",
|
| 38 |
+
" <td>['electronics']</td>\n",
|
| 39 |
+
" </tr>\n",
|
| 40 |
+
" <tr>\n",
|
| 41 |
+
" <th>1</th>\n",
|
| 42 |
+
" <td>Title: Heat sensor with fan cooling Body: Can ...</td>\n",
|
| 43 |
+
" <td>['electronics']</td>\n",
|
| 44 |
+
" </tr>\n",
|
| 45 |
+
" <tr>\n",
|
| 46 |
+
" <th>2</th>\n",
|
| 47 |
+
" <td>Title: Outlet Installation--more wires than my...</td>\n",
|
| 48 |
+
" <td>['electronics']</td>\n",
|
| 49 |
+
" </tr>\n",
|
| 50 |
+
" <tr>\n",
|
| 51 |
+
" <th>3</th>\n",
|
| 52 |
+
" <td>Title: Buck Converter Operation Question Body:...</td>\n",
|
| 53 |
+
" <td>['electronics']</td>\n",
|
| 54 |
+
" </tr>\n",
|
| 55 |
+
" <tr>\n",
|
| 56 |
+
" <th>4</th>\n",
|
| 57 |
+
" <td>Title: Urgent help in area of ASIC design, ver...</td>\n",
|
| 58 |
+
" <td>['electronics']</td>\n",
|
| 59 |
+
" </tr>\n",
|
| 60 |
+
" </tbody>\n",
|
| 61 |
+
"</table>\n",
|
| 62 |
+
"</div>"
|
| 63 |
+
],
|
| 64 |
+
"text/plain": [
|
| 65 |
+
" Input Tags\n",
|
| 66 |
+
"0 Title: What is the effective differencial effe... ['electronics']\n",
|
| 67 |
+
"1 Title: Heat sensor with fan cooling Body: Can ... ['electronics']\n",
|
| 68 |
+
"2 Title: Outlet Installation--more wires than my... ['electronics']\n",
|
| 69 |
+
"3 Title: Buck Converter Operation Question Body:... ['electronics']\n",
|
| 70 |
+
"4 Title: Urgent help in area of ASIC design, ver... ['electronics']"
|
| 71 |
+
]
|
| 72 |
+
},
|
| 73 |
+
"execution_count": 1,
|
| 74 |
+
"metadata": {},
|
| 75 |
+
"output_type": "execute_result"
|
| 76 |
+
}
|
| 77 |
+
],
|
| 78 |
+
"source": [
|
| 79 |
+
"import pandas as pd\n",
|
| 80 |
+
"\n",
|
| 81 |
+
"dataset = pd.read_csv(\"/home/darth/#/SEQuestionClassifier/data/hackerank/combined_data.csv\")\n",
|
| 82 |
+
"dataset.head()"
|
| 83 |
+
]
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"cell_type": "code",
|
| 87 |
+
"execution_count": 2,
|
| 88 |
+
"id": "d9597ebd",
|
| 89 |
+
"metadata": {},
|
| 90 |
+
"outputs": [],
|
| 91 |
+
"source": [
|
| 92 |
+
"df = dataset"
|
| 93 |
+
]
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"cell_type": "code",
|
| 97 |
+
"execution_count": 5,
|
| 98 |
+
"id": "6626fbc7",
|
| 99 |
+
"metadata": {},
|
| 100 |
+
"outputs": [],
|
| 101 |
+
"source": [
|
| 102 |
+
"import ast\n",
|
| 103 |
+
"\n",
|
| 104 |
+
"def clean_tags(tag_string):\n",
|
| 105 |
+
" # Convert the string to a list\n",
|
| 106 |
+
" tag_list = ast.literal_eval(tag_string)\n",
|
| 107 |
+
" # Join the list into a comma-separated string\n",
|
| 108 |
+
" return ', '.join(tag_list)\n",
|
| 109 |
+
"\n",
|
| 110 |
+
"df['Tags'] = df['Tags'].apply(clean_tags)"
|
| 111 |
+
]
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"cell_type": "markdown",
|
| 115 |
+
"id": "5c32d48e",
|
| 116 |
+
"metadata": {},
|
| 117 |
+
"source": []
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"cell_type": "code",
|
| 121 |
+
"execution_count": null,
|
| 122 |
+
"id": "52801f74",
|
| 123 |
+
"metadata": {},
|
| 124 |
+
"outputs": [],
|
| 125 |
+
"source": [
|
| 126 |
+
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
|
| 127 |
+
"from sklearn.preprocessing import LabelEncoder\n",
|
| 128 |
+
"\n",
|
| 129 |
+
"def vectorirse_text(text):\n",
|
| 130 |
+
" \"\"\" Recieves text as input and returns TF-IDF vectors\"\"\"\n",
|
| 131 |
+
" tfidf = TfidfVectorizer(max_features=500000)\n",
|
| 132 |
+
" X = tfidf.fit_transform(text)\n",
|
| 133 |
+
" return X\n",
|
| 134 |
+
"\n",
|
| 135 |
+
"def label_encoding(input):\n",
|
| 136 |
+
" label_encoder = LabelEncoder()\n",
|
| 137 |
+
" return label_encoder.fit_transform(input)\n",
|
| 138 |
+
"\n",
|
| 139 |
+
"\n",
|
| 140 |
+
"X = vectorirse_text(df['Input'])\n",
|
| 141 |
+
"y = label_encoding(df['Tags'])\n",
|
| 142 |
+
"\n",
|
| 143 |
+
"# Import necessary libraries\n",
|
| 144 |
+
"\n"
|
| 145 |
+
]
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"cell_type": "code",
|
| 149 |
+
"execution_count": null,
|
| 150 |
+
"id": "1380ee74",
|
| 151 |
+
"metadata": {},
|
| 152 |
+
"outputs": [],
|
| 153 |
+
"source": [
|
| 154 |
+
"import pandas as pd\n",
|
| 155 |
+
"from sklearn.linear_model import LogisticRegression\n",
|
| 156 |
+
"from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n",
|
| 157 |
+
"\n",
|
| 158 |
+
"# Assuming df is already loaded\n",
|
| 159 |
+
"# And the following functions are available from your preprocessing notebook:\n",
|
| 160 |
+
"# - vectorise_text\n",
|
| 161 |
+
"# - label_encoding\n",
|
| 162 |
+
"\n",
|
| 163 |
+
"# Step 1: Preproces\n",
|
| 164 |
+
"\n",
|
| 165 |
+
"# Step 2: Train Logistic Regression\n",
|
| 166 |
+
"model = LogisticRegression(max_iter=1000)\n",
|
| 167 |
+
"model.fit(X, y)\n",
|
| 168 |
+
"\n",
|
| 169 |
+
"# Step 3: Predict on the entire dataset\n",
|
| 170 |
+
"y_preds = model.predict(X)\n",
|
| 171 |
+
"\n",
|
| 172 |
+
"# Step 4: Evaluate\n",
|
| 173 |
+
"acc = accuracy_score(y, y_preds)\n",
|
| 174 |
+
"print(f\"✅ Accuracy: {acc:.4f}\\n\")\n",
|
| 175 |
+
"\n",
|
| 176 |
+
"print(\"✅ Classification Report:\\n\")\n",
|
| 177 |
+
"print(classification_report(y, y_preds))\n",
|
| 178 |
+
"\n",
|
| 179 |
+
"print(\"✅ Confusion Matrix:\\n\")\n",
|
| 180 |
+
"print(confusion_matrix(y, y_preds))\n",
|
| 181 |
+
"\n",
|
| 182 |
+
"# Step 5: Save results to a CSV\n",
|
| 183 |
+
"output_df = pd.DataFrame({\n",
|
| 184 |
+
" 'y_true': y,\n",
|
| 185 |
+
" 'y_pred': y_preds\n",
|
| 186 |
+
"})\n",
|
| 187 |
+
"\n",
|
| 188 |
+
"output_df.to_csv('part-1.csv', index=False)\n",
|
| 189 |
+
"\n",
|
| 190 |
+
"print(\"\\n✅ part-1.csv saved successfully!\")\n",
|
| 191 |
+
"\n"
|
| 192 |
+
]
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"cell_type": "code",
|
| 196 |
+
"execution_count": null,
|
| 197 |
+
"id": "79783f66",
|
| 198 |
+
"metadata": {},
|
| 199 |
+
"outputs": [
|
| 200 |
+
{
|
| 201 |
+
"data": {
|
| 202 |
+
"text/plain": [
|
| 203 |
+
"np.int64(2)"
|
| 204 |
+
]
|
| 205 |
+
},
|
| 206 |
+
"execution_count": 7,
|
| 207 |
+
"metadata": {},
|
| 208 |
+
"output_type": "execute_result"
|
| 209 |
+
}
|
| 210 |
+
],
|
| 211 |
+
"source": []
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"cell_type": "code",
|
| 215 |
+
"execution_count": null,
|
| 216 |
+
"id": "80fb05a1",
|
| 217 |
+
"metadata": {},
|
| 218 |
+
"outputs": [],
|
| 219 |
+
"source": []
|
| 220 |
+
}
|
| 221 |
+
],
|
| 222 |
+
"metadata": {
|
| 223 |
+
"kernelspec": {
|
| 224 |
+
"display_name": "major02",
|
| 225 |
+
"language": "python",
|
| 226 |
+
"name": "python3"
|
| 227 |
+
},
|
| 228 |
+
"language_info": {
|
| 229 |
+
"codemirror_mode": {
|
| 230 |
+
"name": "ipython",
|
| 231 |
+
"version": 3
|
| 232 |
+
},
|
| 233 |
+
"file_extension": ".py",
|
| 234 |
+
"mimetype": "text/x-python",
|
| 235 |
+
"name": "python",
|
| 236 |
+
"nbconvert_exporter": "python",
|
| 237 |
+
"pygments_lexer": "ipython3",
|
| 238 |
+
"version": "3.10.12"
|
| 239 |
+
}
|
| 240 |
+
},
|
| 241 |
+
"nbformat": 4,
|
| 242 |
+
"nbformat_minor": 5
|
| 243 |
+
}
|
preprocessing.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
problem.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3282c4f77bab6b07d32ad5c4b08b26f01562730af7746fa199c2bbf824f3719
|
| 3 |
+
size 131219
|
requirements.txt
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accelerate==1.6.0
|
| 2 |
+
aiohappyeyeballs==2.6.1
|
| 3 |
+
aiohttp==3.11.18
|
| 4 |
+
aiosignal==1.3.2
|
| 5 |
+
asttokens==3.0.0
|
| 6 |
+
async-timeout==5.0.1
|
| 7 |
+
attrs==25.3.0
|
| 8 |
+
bitsandbytes==0.45.5
|
| 9 |
+
certifi==2025.4.26
|
| 10 |
+
charset-normalizer==3.4.1
|
| 11 |
+
comm==0.2.2
|
| 12 |
+
cut-cross-entropy==25.1.1
|
| 13 |
+
datasets==3.5.0
|
| 14 |
+
debugpy==1.8.14
|
| 15 |
+
decorator==5.2.1
|
| 16 |
+
dill==0.3.8
|
| 17 |
+
exceptiongroup==1.2.2
|
| 18 |
+
executing==2.2.0
|
| 19 |
+
filelock==3.18.0
|
| 20 |
+
frozenlist==1.6.0
|
| 21 |
+
fsspec==2024.12.0
|
| 22 |
+
hf_transfer==0.1.9
|
| 23 |
+
huggingface-hub==0.30.2
|
| 24 |
+
idna==3.10
|
| 25 |
+
ipykernel==6.29.5
|
| 26 |
+
ipython==8.36.0
|
| 27 |
+
jedi==0.19.2
|
| 28 |
+
Jinja2==3.1.6
|
| 29 |
+
joblib==1.4.2
|
| 30 |
+
jupyter_client==8.6.3
|
| 31 |
+
jupyter_core==5.7.2
|
| 32 |
+
markdown-it-py==3.0.0
|
| 33 |
+
MarkupSafe==3.0.2
|
| 34 |
+
matplotlib-inline==0.1.7
|
| 35 |
+
mdurl==0.1.2
|
| 36 |
+
mpmath==1.3.0
|
| 37 |
+
multidict==6.4.3
|
| 38 |
+
multiprocess==0.70.16
|
| 39 |
+
nest-asyncio==1.6.0
|
| 40 |
+
networkx==3.4.2
|
| 41 |
+
numpy==2.0.2
|
| 42 |
+
nvidia-cublas-cu12==12.6.4.1
|
| 43 |
+
nvidia-cuda-cupti-cu12==12.6.80
|
| 44 |
+
nvidia-cuda-nvrtc-cu12==12.6.77
|
| 45 |
+
nvidia-cuda-runtime-cu12==12.6.77
|
| 46 |
+
nvidia-cudnn-cu12==9.5.1.17
|
| 47 |
+
nvidia-cufft-cu12==11.3.0.4
|
| 48 |
+
nvidia-cufile-cu12==1.11.1.6
|
| 49 |
+
nvidia-curand-cu12==10.3.7.77
|
| 50 |
+
nvidia-cusolver-cu12==11.7.1.2
|
| 51 |
+
nvidia-cusparse-cu12==12.5.4.2
|
| 52 |
+
nvidia-cusparselt-cu12==0.6.3
|
| 53 |
+
nvidia-nccl-cu12==2.26.2
|
| 54 |
+
nvidia-nvjitlink-cu12==12.6.85
|
| 55 |
+
nvidia-nvtx-cu12==12.6.77
|
| 56 |
+
packaging==25.0
|
| 57 |
+
pandas==2.2.3
|
| 58 |
+
parso==0.8.4
|
| 59 |
+
peft==0.15.2
|
| 60 |
+
pexpect==4.9.0
|
| 61 |
+
pillow==11.2.1
|
| 62 |
+
platformdirs==4.3.7
|
| 63 |
+
prompt_toolkit==3.0.51
|
| 64 |
+
propcache==0.3.1
|
| 65 |
+
protobuf==6.30.2
|
| 66 |
+
psutil==7.0.0
|
| 67 |
+
ptyprocess==0.7.0
|
| 68 |
+
pure_eval==0.2.3
|
| 69 |
+
pyarrow==20.0.0
|
| 70 |
+
Pygments==2.19.1
|
| 71 |
+
python-dateutil==2.9.0.post0
|
| 72 |
+
pytz==2025.2
|
| 73 |
+
PyYAML==6.0.2
|
| 74 |
+
pyzmq==26.4.0
|
| 75 |
+
regex==2024.11.6
|
| 76 |
+
requests==2.32.3
|
| 77 |
+
rich==14.0.0
|
| 78 |
+
safetensors==0.5.3
|
| 79 |
+
scikit-learn==1.6.1
|
| 80 |
+
scipy==1.15.2
|
| 81 |
+
sentencepiece==0.2.0
|
| 82 |
+
six==1.17.0
|
| 83 |
+
stack-data==0.6.3
|
| 84 |
+
sympy==1.14.0
|
| 85 |
+
threadpoolctl==3.6.0
|
| 86 |
+
tokenizers==0.21.1
|
| 87 |
+
torch==2.7.0
|
| 88 |
+
tornado==6.4.2
|
| 89 |
+
tqdm==4.67.1
|
| 90 |
+
traitlets==5.14.3
|
| 91 |
+
transformers==4.51.3
|
| 92 |
+
triton==3.3.0
|
| 93 |
+
trl==0.15.2
|
| 94 |
+
typing_extensions==4.13.2
|
| 95 |
+
tzdata==2025.2
|
| 96 |
+
unsloth==2025.4.1
|
| 97 |
+
unsloth_zoo==2025.4.1
|
| 98 |
+
urllib3==2.4.0
|
| 99 |
+
wcwidth==0.2.13
|
| 100 |
+
xformers==0.0.29.post3
|
| 101 |
+
xxhash==3.5.0
|
| 102 |
+
yarl==1.20.0
|
unsloth-requirements.txt
ADDED
|
File without changes
|
unsloth_compiled_cache/UnslothBCOTrainer.py
ADDED
|
@@ -0,0 +1,1818 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
2025.4.1
|
| 3 |
+
2025.4.1
|
| 4 |
+
4.51.3
|
| 5 |
+
0.15.2
|
| 6 |
+
__UNSLOTH_VERSIONING__
|
| 7 |
+
"""
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from torch.nn import functional as F
|
| 12 |
+
from trl.trainer.bco_trainer import (Any, AutoModelForCausalLM, BCOConfig, BCOTrainer, BaseImageProcessor, CLF_NAME, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, Literal, LogisticRegression, Optional, PartialState, PeftModel, PreTrainedModel, PreTrainedModelWrapper, PreTrainedTokenizerBase, ProcessorMixin, RUNNING_NAME, RunningMoments, SequentialSampler, Trainer, TrainerCallback, TrainingArguments, Union, _process_tokens, _tokenize, amp, contextmanager, create_reference_model, deepcopy, defaultdict, disable_dropout_in_model, generate_model_card, get_comet_experiment_url, has_length, inspect, is_comet_available, is_peft_available, is_sklearn_available, is_wandb_available, itemgetter, log_table_to_comet_experiment, maybe_apply_chat_template, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_model_for_kbit_training, random, textwrap, torch, tqdm, transformers, version, warnings, F, Optional, PeftModel, PreTrainedModel, Trainer, is_peft_available, os, torch)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from typing import *
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from packaging.version import Version
|
| 19 |
+
import torch
|
| 20 |
+
import numpy as np
|
| 21 |
+
from contextlib import nullcontext
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling
|
| 24 |
+
|
| 25 |
+
torch_compile_options = {
|
| 26 |
+
"epilogue_fusion" : True,
|
| 27 |
+
"max_autotune" : False,
|
| 28 |
+
"shape_padding" : True,
|
| 29 |
+
"trace.enabled" : False,
|
| 30 |
+
"triton.cudagraphs" : False,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
| 34 |
+
def selective_log_softmax(logits, index):
|
| 35 |
+
logits = logits.to(torch.float32)
|
| 36 |
+
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
| 37 |
+
# loop to reduce peak mem consumption
|
| 38 |
+
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
| 39 |
+
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
| 40 |
+
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
| 41 |
+
return per_token_logps
|
| 42 |
+
@dataclass
|
| 43 |
+
class UnslothBCOConfig(BCOConfig):
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
Configuration class for the [`BCOTrainer`].
|
| 47 |
+
|
| 48 |
+
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
| 49 |
+
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
| 50 |
+
command line.
|
| 51 |
+
|
| 52 |
+
Parameters:
|
| 53 |
+
max_length (`int` or `None`, *optional*, defaults to `1024`):
|
| 54 |
+
Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
|
| 55 |
+
to use the default data collator.
|
| 56 |
+
max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
|
| 57 |
+
Maximum length of the prompt. This argument is required if you want to use the default data collator.
|
| 58 |
+
max_completion_length (`int` or `None`, *optional*, defaults to `None`):
|
| 59 |
+
Maximum length of the completion. This argument is required if you want to use the default data collator
|
| 60 |
+
and your model is an encoder-decoder.
|
| 61 |
+
beta (`float`, *optional*, defaults to `0.1`):
|
| 62 |
+
Parameter controlling the deviation from the reference model. Higher β means less deviation from the
|
| 63 |
+
reference model.
|
| 64 |
+
label_pad_token_id (`int`, *optional*, defaults to `-100`):
|
| 65 |
+
Label pad token id. This argument is required if you want to use the default data collator.
|
| 66 |
+
padding_value (`int` or `None`, *optional*, defaults to `None`):
|
| 67 |
+
Padding value to use. If `None`, the padding value of the tokenizer is used.
|
| 68 |
+
truncation_mode (`str`, *optional*, defaults to `"keep_end"`):
|
| 69 |
+
Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
|
| 70 |
+
This argument is required if you want to use the default data collator.
|
| 71 |
+
disable_dropout (`bool`, *optional*, defaults to `True`):
|
| 72 |
+
Whether to disable dropout in the model and reference model.
|
| 73 |
+
generate_during_eval (`bool`, *optional*, defaults to `False`):
|
| 74 |
+
If `True`, generates and logs completions from both the model and the reference model to W&B or Comet during
|
| 75 |
+
evaluation.
|
| 76 |
+
is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`):
|
| 77 |
+
When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
|
| 78 |
+
you need to specify if the model returned by the callable is an encoder-decoder model.
|
| 79 |
+
precompute_ref_log_probs (`bool`, *optional*, defaults to `False`):
|
| 80 |
+
Whether to precompute reference model log probabilities for training and evaluation datasets. This is
|
| 81 |
+
useful when training without the reference model to reduce the total GPU memory needed.
|
| 82 |
+
model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
|
| 83 |
+
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
|
| 84 |
+
string.
|
| 85 |
+
ref_model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
|
| 86 |
+
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model
|
| 87 |
+
from a string.
|
| 88 |
+
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
|
| 89 |
+
Number of processes to use for processing the dataset.
|
| 90 |
+
prompt_sample_size (`int`, *optional*, defaults to `1024`):
|
| 91 |
+
Number of prompts that are fed to density ratio classifier.
|
| 92 |
+
min_density_ratio (`float`, *optional*, defaults to `0.5`):
|
| 93 |
+
Minimum value of the density ratio. The estimated density ratio is clamped to this value.
|
| 94 |
+
max_density_ratio (`float`, *optional*, defaults to `10.0`):
|
| 95 |
+
Maximum value of the density ratio. The estimated density ratio is clamped to this value.
|
| 96 |
+
|
| 97 |
+
"""
|
| 98 |
+
vllm_sampling_params: Optional[Any] = field(
|
| 99 |
+
default = None,
|
| 100 |
+
metadata = {'help': 'vLLM SamplingParams'},
|
| 101 |
+
)
|
| 102 |
+
unsloth_num_chunks : Optional[int] = field(
|
| 103 |
+
default = -1,
|
| 104 |
+
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
| 105 |
+
)
|
| 106 |
+
def __init__(
|
| 107 |
+
self,
|
| 108 |
+
output_dir = None,
|
| 109 |
+
overwrite_output_dir = None,
|
| 110 |
+
do_train = False,
|
| 111 |
+
do_eval = False,
|
| 112 |
+
do_predict = False,
|
| 113 |
+
eval_strategy = 'no',
|
| 114 |
+
prediction_loss_only = False,
|
| 115 |
+
per_device_train_batch_size = 4,
|
| 116 |
+
per_device_eval_batch_size = 4,
|
| 117 |
+
per_gpu_train_batch_size = None,
|
| 118 |
+
per_gpu_eval_batch_size = None,
|
| 119 |
+
gradient_accumulation_steps = 2,
|
| 120 |
+
eval_accumulation_steps = 2,
|
| 121 |
+
eval_delay = 0,
|
| 122 |
+
torch_empty_cache_steps = 250,
|
| 123 |
+
learning_rate = 5e-05,
|
| 124 |
+
weight_decay = 0.01,
|
| 125 |
+
adam_beta1 = 0.9,
|
| 126 |
+
adam_beta2 = 0.999,
|
| 127 |
+
adam_epsilon = 1e-08,
|
| 128 |
+
max_grad_norm = 1.0,
|
| 129 |
+
num_train_epochs = 3.0,
|
| 130 |
+
max_steps = -1,
|
| 131 |
+
lr_scheduler_type = 'linear',
|
| 132 |
+
warmup_ratio = 0.1,
|
| 133 |
+
warmup_steps = 0,
|
| 134 |
+
log_level = 'passive',
|
| 135 |
+
log_level_replica = 'warning',
|
| 136 |
+
log_on_each_node = True,
|
| 137 |
+
logging_dir = None,
|
| 138 |
+
logging_strategy = 'steps',
|
| 139 |
+
logging_first_step = False,
|
| 140 |
+
logging_steps = 1,
|
| 141 |
+
logging_nan_inf_filter = False,
|
| 142 |
+
save_strategy = 'steps',
|
| 143 |
+
save_steps = 500,
|
| 144 |
+
save_total_limit = None,
|
| 145 |
+
save_safetensors = True,
|
| 146 |
+
save_on_each_node = False,
|
| 147 |
+
save_only_model = False,
|
| 148 |
+
restore_callback_states_from_checkpoint = False,
|
| 149 |
+
no_cuda = False,
|
| 150 |
+
use_cpu = False,
|
| 151 |
+
use_mps_device = False,
|
| 152 |
+
seed = 3407,
|
| 153 |
+
data_seed = 3407,
|
| 154 |
+
jit_mode_eval = False,
|
| 155 |
+
use_ipex = False,
|
| 156 |
+
bf16 = False,
|
| 157 |
+
fp16 = False,
|
| 158 |
+
fp16_opt_level = 'O1',
|
| 159 |
+
half_precision_backend = 'auto',
|
| 160 |
+
bf16_full_eval = False,
|
| 161 |
+
fp16_full_eval = False,
|
| 162 |
+
tf32 = None,
|
| 163 |
+
local_rank = -1,
|
| 164 |
+
ddp_backend = None,
|
| 165 |
+
tpu_num_cores = None,
|
| 166 |
+
tpu_metrics_debug = False,
|
| 167 |
+
debug = '',
|
| 168 |
+
dataloader_drop_last = False,
|
| 169 |
+
eval_steps = None,
|
| 170 |
+
dataloader_num_workers = 0,
|
| 171 |
+
dataloader_prefetch_factor = None,
|
| 172 |
+
past_index = -1,
|
| 173 |
+
run_name = None,
|
| 174 |
+
disable_tqdm = None,
|
| 175 |
+
remove_unused_columns = True,
|
| 176 |
+
label_names = None,
|
| 177 |
+
load_best_model_at_end = False,
|
| 178 |
+
metric_for_best_model = None,
|
| 179 |
+
greater_is_better = None,
|
| 180 |
+
ignore_data_skip = False,
|
| 181 |
+
fsdp = '',
|
| 182 |
+
fsdp_min_num_params = 0,
|
| 183 |
+
fsdp_config = None,
|
| 184 |
+
tp_size = 0,
|
| 185 |
+
fsdp_transformer_layer_cls_to_wrap = None,
|
| 186 |
+
accelerator_config = None,
|
| 187 |
+
deepspeed = None,
|
| 188 |
+
label_smoothing_factor = 0.0,
|
| 189 |
+
optim = 'adamw_8bit',
|
| 190 |
+
optim_args = None,
|
| 191 |
+
adafactor = False,
|
| 192 |
+
group_by_length = False,
|
| 193 |
+
length_column_name = 'length',
|
| 194 |
+
report_to = None,
|
| 195 |
+
ddp_find_unused_parameters = None,
|
| 196 |
+
ddp_bucket_cap_mb = None,
|
| 197 |
+
ddp_broadcast_buffers = None,
|
| 198 |
+
dataloader_pin_memory = True,
|
| 199 |
+
dataloader_persistent_workers = False,
|
| 200 |
+
skip_memory_metrics = True,
|
| 201 |
+
use_legacy_prediction_loop = False,
|
| 202 |
+
push_to_hub = False,
|
| 203 |
+
resume_from_checkpoint = None,
|
| 204 |
+
hub_model_id = None,
|
| 205 |
+
hub_strategy = 'every_save',
|
| 206 |
+
hub_token = None,
|
| 207 |
+
hub_private_repo = None,
|
| 208 |
+
hub_always_push = False,
|
| 209 |
+
gradient_checkpointing = False,
|
| 210 |
+
gradient_checkpointing_kwargs = None,
|
| 211 |
+
include_inputs_for_metrics = False,
|
| 212 |
+
eval_do_concat_batches = True,
|
| 213 |
+
fp16_backend = 'auto',
|
| 214 |
+
push_to_hub_model_id = None,
|
| 215 |
+
push_to_hub_organization = None,
|
| 216 |
+
push_to_hub_token = None,
|
| 217 |
+
mp_parameters = '',
|
| 218 |
+
auto_find_batch_size = False,
|
| 219 |
+
full_determinism = False,
|
| 220 |
+
torchdynamo = None,
|
| 221 |
+
ray_scope = 'last',
|
| 222 |
+
ddp_timeout = 1800,
|
| 223 |
+
torch_compile = False,
|
| 224 |
+
torch_compile_backend = None,
|
| 225 |
+
torch_compile_mode = None,
|
| 226 |
+
include_tokens_per_second = False,
|
| 227 |
+
include_num_input_tokens_seen = False,
|
| 228 |
+
neftune_noise_alpha = None,
|
| 229 |
+
optim_target_modules = None,
|
| 230 |
+
batch_eval_metrics = False,
|
| 231 |
+
eval_on_start = False,
|
| 232 |
+
use_liger_kernel = False,
|
| 233 |
+
eval_use_gather_object = False,
|
| 234 |
+
average_tokens_across_devices = False,
|
| 235 |
+
max_length = 1024,
|
| 236 |
+
max_prompt_length = 512,
|
| 237 |
+
max_completion_length = None,
|
| 238 |
+
beta = 0.1,
|
| 239 |
+
label_pad_token_id = -100,
|
| 240 |
+
padding_value = None,
|
| 241 |
+
truncation_mode = 'keep_end',
|
| 242 |
+
disable_dropout = True,
|
| 243 |
+
generate_during_eval = False,
|
| 244 |
+
is_encoder_decoder = None,
|
| 245 |
+
precompute_ref_log_probs = False,
|
| 246 |
+
model_init_kwargs = None,
|
| 247 |
+
ref_model_init_kwargs = None,
|
| 248 |
+
dataset_num_proc = None,
|
| 249 |
+
prompt_sample_size = 1024,
|
| 250 |
+
min_density_ratio = 0.5,
|
| 251 |
+
max_density_ratio = 10.0,
|
| 252 |
+
vllm_sampling_params = None,
|
| 253 |
+
unsloth_num_chunks = -1,
|
| 254 |
+
**kwargs,
|
| 255 |
+
):
|
| 256 |
+
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
| 257 |
+
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
| 258 |
+
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
| 259 |
+
output_dir = 'unsloth_training_checkpoints'
|
| 260 |
+
save_strategy = 'no'
|
| 261 |
+
if dataset_num_proc is None:
|
| 262 |
+
from multiprocessing import cpu_count
|
| 263 |
+
dataset_num_proc = cpu_count()
|
| 264 |
+
|
| 265 |
+
super().__init__(
|
| 266 |
+
output_dir = output_dir,
|
| 267 |
+
overwrite_output_dir = overwrite_output_dir,
|
| 268 |
+
do_train = do_train,
|
| 269 |
+
do_eval = do_eval,
|
| 270 |
+
do_predict = do_predict,
|
| 271 |
+
eval_strategy = eval_strategy,
|
| 272 |
+
prediction_loss_only = prediction_loss_only,
|
| 273 |
+
per_device_train_batch_size = per_device_train_batch_size,
|
| 274 |
+
per_device_eval_batch_size = per_device_eval_batch_size,
|
| 275 |
+
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
| 276 |
+
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
| 277 |
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 278 |
+
eval_accumulation_steps = eval_accumulation_steps,
|
| 279 |
+
eval_delay = eval_delay,
|
| 280 |
+
torch_empty_cache_steps = torch_empty_cache_steps,
|
| 281 |
+
learning_rate = learning_rate,
|
| 282 |
+
weight_decay = weight_decay,
|
| 283 |
+
adam_beta1 = adam_beta1,
|
| 284 |
+
adam_beta2 = adam_beta2,
|
| 285 |
+
adam_epsilon = adam_epsilon,
|
| 286 |
+
max_grad_norm = max_grad_norm,
|
| 287 |
+
num_train_epochs = num_train_epochs,
|
| 288 |
+
max_steps = max_steps,
|
| 289 |
+
lr_scheduler_type = lr_scheduler_type,
|
| 290 |
+
warmup_ratio = warmup_ratio,
|
| 291 |
+
warmup_steps = warmup_steps,
|
| 292 |
+
log_level = log_level,
|
| 293 |
+
log_level_replica = log_level_replica,
|
| 294 |
+
log_on_each_node = log_on_each_node,
|
| 295 |
+
logging_dir = logging_dir,
|
| 296 |
+
logging_strategy = logging_strategy,
|
| 297 |
+
logging_first_step = logging_first_step,
|
| 298 |
+
logging_steps = logging_steps,
|
| 299 |
+
logging_nan_inf_filter = logging_nan_inf_filter,
|
| 300 |
+
save_strategy = save_strategy,
|
| 301 |
+
save_steps = save_steps,
|
| 302 |
+
save_total_limit = save_total_limit,
|
| 303 |
+
save_safetensors = save_safetensors,
|
| 304 |
+
save_on_each_node = save_on_each_node,
|
| 305 |
+
save_only_model = save_only_model,
|
| 306 |
+
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
| 307 |
+
no_cuda = no_cuda,
|
| 308 |
+
use_cpu = use_cpu,
|
| 309 |
+
use_mps_device = use_mps_device,
|
| 310 |
+
seed = seed,
|
| 311 |
+
data_seed = data_seed,
|
| 312 |
+
jit_mode_eval = jit_mode_eval,
|
| 313 |
+
use_ipex = use_ipex,
|
| 314 |
+
bf16 = bf16,
|
| 315 |
+
fp16 = fp16,
|
| 316 |
+
fp16_opt_level = fp16_opt_level,
|
| 317 |
+
half_precision_backend = half_precision_backend,
|
| 318 |
+
bf16_full_eval = bf16_full_eval,
|
| 319 |
+
fp16_full_eval = fp16_full_eval,
|
| 320 |
+
tf32 = tf32,
|
| 321 |
+
local_rank = local_rank,
|
| 322 |
+
ddp_backend = ddp_backend,
|
| 323 |
+
tpu_num_cores = tpu_num_cores,
|
| 324 |
+
tpu_metrics_debug = tpu_metrics_debug,
|
| 325 |
+
debug = debug,
|
| 326 |
+
dataloader_drop_last = dataloader_drop_last,
|
| 327 |
+
eval_steps = eval_steps,
|
| 328 |
+
dataloader_num_workers = dataloader_num_workers,
|
| 329 |
+
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
| 330 |
+
past_index = past_index,
|
| 331 |
+
run_name = run_name,
|
| 332 |
+
disable_tqdm = disable_tqdm,
|
| 333 |
+
remove_unused_columns = remove_unused_columns,
|
| 334 |
+
label_names = label_names,
|
| 335 |
+
load_best_model_at_end = load_best_model_at_end,
|
| 336 |
+
metric_for_best_model = metric_for_best_model,
|
| 337 |
+
greater_is_better = greater_is_better,
|
| 338 |
+
ignore_data_skip = ignore_data_skip,
|
| 339 |
+
fsdp = fsdp,
|
| 340 |
+
fsdp_min_num_params = fsdp_min_num_params,
|
| 341 |
+
fsdp_config = fsdp_config,
|
| 342 |
+
tp_size = tp_size,
|
| 343 |
+
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
| 344 |
+
accelerator_config = accelerator_config,
|
| 345 |
+
deepspeed = deepspeed,
|
| 346 |
+
label_smoothing_factor = label_smoothing_factor,
|
| 347 |
+
optim = optim,
|
| 348 |
+
optim_args = optim_args,
|
| 349 |
+
adafactor = adafactor,
|
| 350 |
+
group_by_length = group_by_length,
|
| 351 |
+
length_column_name = length_column_name,
|
| 352 |
+
report_to = report_to,
|
| 353 |
+
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
| 354 |
+
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
| 355 |
+
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
| 356 |
+
dataloader_pin_memory = dataloader_pin_memory,
|
| 357 |
+
dataloader_persistent_workers = dataloader_persistent_workers,
|
| 358 |
+
skip_memory_metrics = skip_memory_metrics,
|
| 359 |
+
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
| 360 |
+
push_to_hub = push_to_hub,
|
| 361 |
+
resume_from_checkpoint = resume_from_checkpoint,
|
| 362 |
+
hub_model_id = hub_model_id,
|
| 363 |
+
hub_strategy = hub_strategy,
|
| 364 |
+
hub_token = hub_token,
|
| 365 |
+
hub_private_repo = hub_private_repo,
|
| 366 |
+
hub_always_push = hub_always_push,
|
| 367 |
+
gradient_checkpointing = gradient_checkpointing,
|
| 368 |
+
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
| 369 |
+
include_inputs_for_metrics = include_inputs_for_metrics,
|
| 370 |
+
eval_do_concat_batches = eval_do_concat_batches,
|
| 371 |
+
fp16_backend = fp16_backend,
|
| 372 |
+
push_to_hub_model_id = push_to_hub_model_id,
|
| 373 |
+
push_to_hub_organization = push_to_hub_organization,
|
| 374 |
+
push_to_hub_token = push_to_hub_token,
|
| 375 |
+
mp_parameters = mp_parameters,
|
| 376 |
+
auto_find_batch_size = auto_find_batch_size,
|
| 377 |
+
full_determinism = full_determinism,
|
| 378 |
+
torchdynamo = torchdynamo,
|
| 379 |
+
ray_scope = ray_scope,
|
| 380 |
+
ddp_timeout = ddp_timeout,
|
| 381 |
+
torch_compile = torch_compile,
|
| 382 |
+
torch_compile_backend = torch_compile_backend,
|
| 383 |
+
torch_compile_mode = torch_compile_mode,
|
| 384 |
+
include_tokens_per_second = include_tokens_per_second,
|
| 385 |
+
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
| 386 |
+
neftune_noise_alpha = neftune_noise_alpha,
|
| 387 |
+
optim_target_modules = optim_target_modules,
|
| 388 |
+
batch_eval_metrics = batch_eval_metrics,
|
| 389 |
+
eval_on_start = eval_on_start,
|
| 390 |
+
use_liger_kernel = use_liger_kernel,
|
| 391 |
+
eval_use_gather_object = eval_use_gather_object,
|
| 392 |
+
average_tokens_across_devices = average_tokens_across_devices,
|
| 393 |
+
max_length = max_length,
|
| 394 |
+
max_prompt_length = max_prompt_length,
|
| 395 |
+
max_completion_length = max_completion_length,
|
| 396 |
+
beta = beta,
|
| 397 |
+
label_pad_token_id = label_pad_token_id,
|
| 398 |
+
padding_value = padding_value,
|
| 399 |
+
truncation_mode = truncation_mode,
|
| 400 |
+
disable_dropout = disable_dropout,
|
| 401 |
+
generate_during_eval = generate_during_eval,
|
| 402 |
+
is_encoder_decoder = is_encoder_decoder,
|
| 403 |
+
precompute_ref_log_probs = precompute_ref_log_probs,
|
| 404 |
+
model_init_kwargs = model_init_kwargs,
|
| 405 |
+
ref_model_init_kwargs = ref_model_init_kwargs,
|
| 406 |
+
dataset_num_proc = dataset_num_proc,
|
| 407 |
+
prompt_sample_size = prompt_sample_size,
|
| 408 |
+
min_density_ratio = min_density_ratio,
|
| 409 |
+
max_density_ratio = max_density_ratio,**kwargs)
|
| 410 |
+
self.vllm_sampling_params = vllm_sampling_params
|
| 411 |
+
self.unsloth_num_chunks = unsloth_num_chunks
|
| 412 |
+
pass
|
| 413 |
+
|
| 414 |
+
class _UnslothBCOTrainer(Trainer):
|
| 415 |
+
r""""""
|
| 416 |
+
|
| 417 |
+
_tag_names = ["trl", "bco"]
|
| 418 |
+
|
| 419 |
+
def __init__(
|
| 420 |
+
self,
|
| 421 |
+
model: Union[PreTrainedModel, nn.Module, str] = None,
|
| 422 |
+
ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
|
| 423 |
+
args: BCOConfig = None,
|
| 424 |
+
train_dataset: Optional[Dataset] = None,
|
| 425 |
+
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
|
| 426 |
+
processing_class: Optional[
|
| 427 |
+
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
|
| 428 |
+
] = None,
|
| 429 |
+
data_collator: Optional[DataCollator] = None,
|
| 430 |
+
model_init: Optional[Callable[[], PreTrainedModel]] = None,
|
| 431 |
+
callbacks: Optional[list[TrainerCallback]] = None,
|
| 432 |
+
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
| 433 |
+
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
| 434 |
+
peft_config: Optional[dict] = None,
|
| 435 |
+
compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
|
| 436 |
+
model_adapter_name: Optional[str] = None,
|
| 437 |
+
ref_adapter_name: Optional[str] = None,
|
| 438 |
+
embedding_func: Optional[Callable] = None,
|
| 439 |
+
embedding_tokenizer: Optional[PreTrainedTokenizerBase] = None,
|
| 440 |
+
):
|
| 441 |
+
if not is_sklearn_available():
|
| 442 |
+
raise ImportError(
|
| 443 |
+
"BCOTrainer requires the scikit-learn library. Please install it with `pip install scikit-learn`."
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
if type(args) is TrainingArguments:
|
| 447 |
+
raise ValueError("Please use `BCOConfig` instead `TrainingArguments`.")
|
| 448 |
+
|
| 449 |
+
if not isinstance(model, str) and ref_model is model:
|
| 450 |
+
raise ValueError(
|
| 451 |
+
"`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the "
|
| 452 |
+
"same as `model`, you must mass a copy of it, or `None` if you use peft."
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
if args.model_init_kwargs is None:
|
| 456 |
+
model_init_kwargs = {}
|
| 457 |
+
elif not isinstance(model, str):
|
| 458 |
+
raise ValueError("You passed model_kwargs to the BCOTrainer. But your model is already instantiated.")
|
| 459 |
+
else:
|
| 460 |
+
model_init_kwargs = args.model_init_kwargs
|
| 461 |
+
torch_dtype = model_init_kwargs.get("torch_dtype")
|
| 462 |
+
if torch_dtype is not None:
|
| 463 |
+
# Convert to `torch.dtype` if an str is passed
|
| 464 |
+
if isinstance(torch_dtype, str) and torch_dtype != "auto":
|
| 465 |
+
torch_dtype = getattr(torch, torch_dtype)
|
| 466 |
+
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
|
| 467 |
+
raise ValueError(
|
| 468 |
+
f"Invalid `torch_dtype` passed to the BCOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
|
| 469 |
+
)
|
| 470 |
+
model_init_kwargs["torch_dtype"] = torch_dtype
|
| 471 |
+
|
| 472 |
+
if args.ref_model_init_kwargs is None:
|
| 473 |
+
ref_model_init_kwargs = {}
|
| 474 |
+
elif not isinstance(ref_model, str):
|
| 475 |
+
raise ValueError(
|
| 476 |
+
"You passed ref_model_kwargs to the BCOTrainer. But your ref_model is already instantiated."
|
| 477 |
+
)
|
| 478 |
+
else:
|
| 479 |
+
ref_model_init_kwargs = args.ref_model_init_kwargs
|
| 480 |
+
torch_dtype = ref_model_init_kwargs.get("torch_dtype")
|
| 481 |
+
if torch_dtype is not None:
|
| 482 |
+
# Convert to `torch.dtype` if an str is passed
|
| 483 |
+
if isinstance(torch_dtype, str) and torch_dtype != "auto":
|
| 484 |
+
torch_dtype = getattr(torch, torch_dtype)
|
| 485 |
+
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
|
| 486 |
+
raise ValueError(
|
| 487 |
+
f"Invalid `torch_dtype` passed to the BCOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
|
| 488 |
+
)
|
| 489 |
+
ref_model_init_kwargs["torch_dtype"] = torch_dtype
|
| 490 |
+
|
| 491 |
+
if isinstance(model, str):
|
| 492 |
+
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
|
| 493 |
+
|
| 494 |
+
if isinstance(ref_model, str):
|
| 495 |
+
ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs)
|
| 496 |
+
|
| 497 |
+
# Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
|
| 498 |
+
# has been called in order to properly call autocast if needed.
|
| 499 |
+
self._peft_has_been_casted_to_bf16 = False
|
| 500 |
+
|
| 501 |
+
if not is_peft_available() and peft_config is not None:
|
| 502 |
+
raise ValueError(
|
| 503 |
+
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it with `pip install peft` to use the PEFT models"
|
| 504 |
+
)
|
| 505 |
+
elif is_peft_available() and peft_config is not None:
|
| 506 |
+
# if model is a peft model and we have a peft_config, we merge and unload it first
|
| 507 |
+
if isinstance(model, PeftModel):
|
| 508 |
+
model = model.merge_and_unload()
|
| 509 |
+
|
| 510 |
+
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
|
| 511 |
+
_support_gc_kwargs = hasattr(
|
| 512 |
+
args, "gradient_checkpointing_kwargs"
|
| 513 |
+
) and "gradient_checkpointing_kwargs" in list(
|
| 514 |
+
inspect.signature(prepare_model_for_kbit_training).parameters
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
|
| 518 |
+
|
| 519 |
+
if _support_gc_kwargs:
|
| 520 |
+
prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
|
| 521 |
+
|
| 522 |
+
model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
|
| 523 |
+
elif getattr(args, "gradient_checkpointing", False):
|
| 524 |
+
# For backward compatibility with older versions of transformers
|
| 525 |
+
if hasattr(model, "enable_input_require_grads"):
|
| 526 |
+
model.enable_input_require_grads()
|
| 527 |
+
else:
|
| 528 |
+
|
| 529 |
+
def make_inputs_require_grad(module, input, output):
|
| 530 |
+
output.requires_grad_(True)
|
| 531 |
+
|
| 532 |
+
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
| 533 |
+
|
| 534 |
+
# get peft model with the given config
|
| 535 |
+
model = model
|
| 536 |
+
if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
|
| 537 |
+
peft_module_casting_to_bf16(model)
|
| 538 |
+
# If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
|
| 539 |
+
self._peft_has_been_casted_to_bf16 = True
|
| 540 |
+
|
| 541 |
+
# For models that use gradient_checkpointing, we need to attach a hook that enables input
|
| 542 |
+
# to explicitly have `requires_grad=True`, otherwise training will either silently
|
| 543 |
+
# fail or completely fail.
|
| 544 |
+
elif getattr(args, "gradient_checkpointing", False):
|
| 545 |
+
# For backward compatibility with older versions of transformers
|
| 546 |
+
if hasattr(model, "enable_input_require_grads"):
|
| 547 |
+
model.enable_input_require_grads()
|
| 548 |
+
else:
|
| 549 |
+
|
| 550 |
+
def make_inputs_require_grad(module, input, output):
|
| 551 |
+
output.requires_grad_(True)
|
| 552 |
+
|
| 553 |
+
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
| 554 |
+
|
| 555 |
+
if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
|
| 556 |
+
raise ValueError(
|
| 557 |
+
"`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
|
| 558 |
+
" Please install `wandb` or `comet-ml` to resolve."
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
if model is not None:
|
| 562 |
+
self.is_encoder_decoder = model.config.is_encoder_decoder
|
| 563 |
+
elif args.is_encoder_decoder is None:
|
| 564 |
+
raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
|
| 565 |
+
else:
|
| 566 |
+
self.is_encoder_decoder = args.is_encoder_decoder
|
| 567 |
+
|
| 568 |
+
self.is_peft_model = is_peft_available() and isinstance(model, PeftModel)
|
| 569 |
+
self.model_adapter_name = model_adapter_name
|
| 570 |
+
self.ref_adapter_name = ref_adapter_name
|
| 571 |
+
|
| 572 |
+
if ref_model:
|
| 573 |
+
self.ref_model = ref_model
|
| 574 |
+
elif self.is_peft_model or args.precompute_ref_log_probs:
|
| 575 |
+
# The `model` with adapters turned off will be used as the reference model
|
| 576 |
+
self.ref_model = None
|
| 577 |
+
else:
|
| 578 |
+
self.ref_model = create_reference_model(model)
|
| 579 |
+
|
| 580 |
+
if processing_class is None:
|
| 581 |
+
raise ValueError(
|
| 582 |
+
"max_length or a processing_class must be specified when using the default DPODataCollatorWithPadding"
|
| 583 |
+
)
|
| 584 |
+
if args.max_length is None:
|
| 585 |
+
warnings.warn(
|
| 586 |
+
"When using DPODataCollatorWithPadding, you should set `max_length` in the `BCOConfig`. "
|
| 587 |
+
"It will be set to `512` by default, but you should do it yourself in the future.",
|
| 588 |
+
UserWarning,
|
| 589 |
+
)
|
| 590 |
+
max_length = 512
|
| 591 |
+
if args.max_length is not None:
|
| 592 |
+
max_length = args.max_length
|
| 593 |
+
|
| 594 |
+
if args.max_prompt_length is None:
|
| 595 |
+
warnings.warn(
|
| 596 |
+
"When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the `BCOConfig`. "
|
| 597 |
+
"It will be set to `128` by default, but you should do it yourself in the future.",
|
| 598 |
+
UserWarning,
|
| 599 |
+
)
|
| 600 |
+
max_prompt_length = 128
|
| 601 |
+
if args.max_prompt_length is not None:
|
| 602 |
+
max_prompt_length = args.max_prompt_length
|
| 603 |
+
|
| 604 |
+
max_completion_length = None
|
| 605 |
+
if args.max_completion_length is None and self.is_encoder_decoder:
|
| 606 |
+
warnings.warn(
|
| 607 |
+
"When using DPODataCollatorWithPadding with an encoder decoder architecture, you should set `max_completion_length` in the BCOTrainer's init"
|
| 608 |
+
" it will be set to `128` by default, but you should do it yourself in the future.",
|
| 609 |
+
UserWarning,
|
| 610 |
+
)
|
| 611 |
+
max_completion_length = 128
|
| 612 |
+
if args.max_completion_length is not None and self.is_encoder_decoder:
|
| 613 |
+
max_completion_length = args.max_completion_length
|
| 614 |
+
|
| 615 |
+
if data_collator is None:
|
| 616 |
+
data_collator = DPODataCollatorWithPadding(
|
| 617 |
+
pad_token_id=processing_class.pad_token_id,
|
| 618 |
+
label_pad_token_id=args.label_pad_token_id,
|
| 619 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
if args.remove_unused_columns:
|
| 623 |
+
args.remove_unused_columns = False
|
| 624 |
+
# warn users
|
| 625 |
+
warnings.warn(
|
| 626 |
+
"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your BCOConfig"
|
| 627 |
+
" we have set it for you, but you should do it yourself in the future.",
|
| 628 |
+
UserWarning,
|
| 629 |
+
)
|
| 630 |
+
|
| 631 |
+
self.use_dpo_data_collator = True
|
| 632 |
+
else:
|
| 633 |
+
self.use_dpo_data_collator = False
|
| 634 |
+
|
| 635 |
+
# Disable dropout in the model and reference model
|
| 636 |
+
if args.disable_dropout:
|
| 637 |
+
disable_dropout_in_model(model)
|
| 638 |
+
if self.ref_model is not None:
|
| 639 |
+
disable_dropout_in_model(self.ref_model)
|
| 640 |
+
|
| 641 |
+
self.max_length = max_length
|
| 642 |
+
self.generate_during_eval = args.generate_during_eval
|
| 643 |
+
self.label_pad_token_id = args.label_pad_token_id
|
| 644 |
+
self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id
|
| 645 |
+
self.max_prompt_length = max_prompt_length
|
| 646 |
+
self.truncation_mode = args.truncation_mode
|
| 647 |
+
self.max_completion_length = max_completion_length
|
| 648 |
+
self.precompute_ref_log_probs = args.precompute_ref_log_probs
|
| 649 |
+
|
| 650 |
+
# Since ref_logs are precomputed on the first call to get_train/eval_dataloader
|
| 651 |
+
# keep track of first called to avoid computation of future calls
|
| 652 |
+
self._precomputed_train_ref_log_probs = False
|
| 653 |
+
self._precomputed_eval_ref_log_probs = False
|
| 654 |
+
|
| 655 |
+
# metric
|
| 656 |
+
self._stored_metrics = defaultdict(lambda: defaultdict(list))
|
| 657 |
+
|
| 658 |
+
# BCO parameter
|
| 659 |
+
self.beta = args.beta
|
| 660 |
+
self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
|
| 661 |
+
self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
|
| 662 |
+
if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
|
| 663 |
+
warnings.warn(
|
| 664 |
+
"You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
|
| 665 |
+
"`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
|
| 666 |
+
"greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
|
| 667 |
+
"loss.",
|
| 668 |
+
UserWarning,
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
# Underlying Distribution Matching argument
|
| 672 |
+
self.embedding_func = embedding_func
|
| 673 |
+
self.embedding_tokenizer = embedding_tokenizer
|
| 674 |
+
|
| 675 |
+
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
|
| 676 |
+
# input tensor associated with the key "input_ids". However, in BCO, the sampled data does not include the
|
| 677 |
+
# "input_ids" key. Instead, the available keys are "prompt_input_ids" and "completion_input_ids". As a result,
|
| 678 |
+
# the trainer issues the warning: "Could not estimate the number of tokens of the input, floating-point
|
| 679 |
+
# operations will not be computed." To suppress this warning, we set the "estimate_tokens" key in the model's
|
| 680 |
+
# "warnings_issued" dictionary to True. This acts as a flag to indicate that the warning has already been
|
| 681 |
+
# issued.
|
| 682 |
+
model.warnings_issued["estimate_tokens"] = True
|
| 683 |
+
|
| 684 |
+
with PartialState().local_main_process_first():
|
| 685 |
+
# Apply the chat template if needed
|
| 686 |
+
train_dataset = train_dataset.map(
|
| 687 |
+
maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc
|
| 688 |
+
)
|
| 689 |
+
if eval_dataset is not None:
|
| 690 |
+
eval_dataset = eval_dataset.map(
|
| 691 |
+
maybe_apply_chat_template,
|
| 692 |
+
fn_kwargs={"tokenizer": processing_class},
|
| 693 |
+
num_proc=args.dataset_num_proc,
|
| 694 |
+
)
|
| 695 |
+
# Shuffle the datasets
|
| 696 |
+
train_dataset = train_dataset.shuffle(seed=args.data_seed)
|
| 697 |
+
if eval_dataset is not None:
|
| 698 |
+
eval_dataset = eval_dataset.shuffle(seed=args.data_seed)
|
| 699 |
+
# Tokenize and prepare the training datasets
|
| 700 |
+
train_dataset = train_dataset.map(
|
| 701 |
+
_tokenize,
|
| 702 |
+
batched=True,
|
| 703 |
+
fn_kwargs={"tokenizer": processing_class, "embedding_tokenizer": self.embedding_tokenizer},
|
| 704 |
+
num_proc=args.dataset_num_proc,
|
| 705 |
+
desc="Tokenizing train dataset",
|
| 706 |
+
)
|
| 707 |
+
|
| 708 |
+
# Prepare the datasets
|
| 709 |
+
fn_kwargs = {
|
| 710 |
+
"prefix": "",
|
| 711 |
+
"is_encoder_decoder": self.is_encoder_decoder,
|
| 712 |
+
"tokenizer": processing_class,
|
| 713 |
+
"max_length": self.max_length,
|
| 714 |
+
"truncation_mode": self.truncation_mode,
|
| 715 |
+
"label_pad_token_id": self.label_pad_token_id,
|
| 716 |
+
"max_prompt_length": self.max_prompt_length,
|
| 717 |
+
"max_completion_length": self.max_completion_length,
|
| 718 |
+
}
|
| 719 |
+
train_dataset = train_dataset.map(
|
| 720 |
+
_process_tokens,
|
| 721 |
+
fn_kwargs=fn_kwargs,
|
| 722 |
+
num_proc=args.dataset_num_proc,
|
| 723 |
+
desc="Processing tokenized train dataset",
|
| 724 |
+
)
|
| 725 |
+
|
| 726 |
+
if eval_dataset is not None:
|
| 727 |
+
# Tokenize
|
| 728 |
+
eval_dataset = eval_dataset.map(
|
| 729 |
+
_tokenize,
|
| 730 |
+
fn_kwargs={"tokenizer": processing_class, "embedding_tokenizer": self.embedding_tokenizer},
|
| 731 |
+
batched=True,
|
| 732 |
+
num_proc=args.dataset_num_proc,
|
| 733 |
+
desc="Tokenizing eval dataset",
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
# Process
|
| 737 |
+
fn_kwargs = {
|
| 738 |
+
"prefix": "",
|
| 739 |
+
"is_encoder_decoder": self.is_encoder_decoder,
|
| 740 |
+
"tokenizer": processing_class,
|
| 741 |
+
"max_length": self.max_length,
|
| 742 |
+
"truncation_mode": self.truncation_mode,
|
| 743 |
+
"label_pad_token_id": self.label_pad_token_id,
|
| 744 |
+
"max_prompt_length": self.max_prompt_length,
|
| 745 |
+
"max_completion_length": self.max_completion_length,
|
| 746 |
+
}
|
| 747 |
+
eval_dataset = eval_dataset.map(
|
| 748 |
+
_process_tokens,
|
| 749 |
+
fn_kwargs=fn_kwargs,
|
| 750 |
+
num_proc=args.dataset_num_proc,
|
| 751 |
+
desc="Processing tokenized eval dataset",
|
| 752 |
+
)
|
| 753 |
+
|
| 754 |
+
desirable = train_dataset.filter(
|
| 755 |
+
lambda x: x["label"], num_proc=args.dataset_num_proc, desc="Filtering desirable examples"
|
| 756 |
+
)
|
| 757 |
+
undesirable = train_dataset.filter(
|
| 758 |
+
lambda x: not x["label"], num_proc=args.dataset_num_proc, desc="Filtering undesirable examples"
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
desirable = desirable.shuffle(seed=args.data_seed)
|
| 762 |
+
undesirable = undesirable.shuffle(seed=args.data_seed)
|
| 763 |
+
|
| 764 |
+
super().__init__(
|
| 765 |
+
model=model,
|
| 766 |
+
args=args,
|
| 767 |
+
data_collator=data_collator,
|
| 768 |
+
train_dataset=train_dataset,
|
| 769 |
+
eval_dataset=eval_dataset,
|
| 770 |
+
processing_class=processing_class,
|
| 771 |
+
model_init=model_init,
|
| 772 |
+
compute_metrics=compute_metrics,
|
| 773 |
+
callbacks=callbacks,
|
| 774 |
+
optimizers=optimizers,
|
| 775 |
+
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
| 776 |
+
)
|
| 777 |
+
|
| 778 |
+
# Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
|
| 779 |
+
# model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
|
| 780 |
+
# self.model_accepts_loss_kwargs to False to enable scaling.
|
| 781 |
+
self.model_accepts_loss_kwargs = False
|
| 782 |
+
|
| 783 |
+
# Add tags for models that have been loaded with the correct transformers version
|
| 784 |
+
if hasattr(self.model, "add_model_tags"):
|
| 785 |
+
self.model.add_model_tags(self._tag_names)
|
| 786 |
+
|
| 787 |
+
if not hasattr(self, "accelerator"):
|
| 788 |
+
raise AttributeError(
|
| 789 |
+
"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
|
| 790 |
+
)
|
| 791 |
+
|
| 792 |
+
# Deepspeed Zero-3 does not support precompute_ref_log_probs
|
| 793 |
+
if self.is_deepspeed_enabled:
|
| 794 |
+
if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs:
|
| 795 |
+
raise ValueError(
|
| 796 |
+
"You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`."
|
| 797 |
+
)
|
| 798 |
+
|
| 799 |
+
if self.ref_model is None:
|
| 800 |
+
if not (self.is_peft_model or self.precompute_ref_log_probs):
|
| 801 |
+
raise ValueError(
|
| 802 |
+
"No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`"
|
| 803 |
+
)
|
| 804 |
+
else:
|
| 805 |
+
if self.is_deepspeed_enabled:
|
| 806 |
+
self.ref_model = self._prepare_deepspeed(self.ref_model)
|
| 807 |
+
else:
|
| 808 |
+
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
|
| 809 |
+
|
| 810 |
+
self.running = RunningMoments(accelerator=self.accelerator)
|
| 811 |
+
|
| 812 |
+
if self.embedding_func is None:
|
| 813 |
+
return
|
| 814 |
+
|
| 815 |
+
chosen_embeddings = self._get_sample_prompt_embeddings(desirable, sample_size=self.args.prompt_sample_size)
|
| 816 |
+
rejected_embeddings = self._get_sample_prompt_embeddings(undesirable, sample_size=self.args.prompt_sample_size)
|
| 817 |
+
|
| 818 |
+
embeddings = torch.cat((chosen_embeddings, rejected_embeddings), dim=0)
|
| 819 |
+
labels = torch.cat(
|
| 820 |
+
(torch.ones_like(chosen_embeddings[:, 0]), torch.zeros_like(rejected_embeddings[:, 0])), dim=0
|
| 821 |
+
)
|
| 822 |
+
|
| 823 |
+
self.clf = LogisticRegression(class_weight="balanced").fit(
|
| 824 |
+
embeddings.cpu().float().numpy(), labels.cpu().numpy()
|
| 825 |
+
)
|
| 826 |
+
|
| 827 |
+
@property
|
| 828 |
+
def match_underlying_distribution(self):
|
| 829 |
+
return self.embedding_func is not None and self.embedding_tokenizer is not None
|
| 830 |
+
|
| 831 |
+
def _get_chosen_prob(self, prompt_embeddings: torch.FloatTensor) -> torch.FloatTensor:
|
| 832 |
+
"""
|
| 833 |
+
Calculates the probability if the given prompt embedding is from desirable dataset.
|
| 834 |
+
This function calculates the probability in the process and ensemble across processes.
|
| 835 |
+
"""
|
| 836 |
+
dtype = prompt_embeddings.dtype
|
| 837 |
+
device = prompt_embeddings.device
|
| 838 |
+
rank = self.accelerator.process_index
|
| 839 |
+
|
| 840 |
+
padded_prompt_embeddings = self.accelerator.pad_across_processes(
|
| 841 |
+
prompt_embeddings, pad_index=self.embedding_tokenizer.pad_token_id
|
| 842 |
+
)
|
| 843 |
+
sample_size = padded_prompt_embeddings.shape[0]
|
| 844 |
+
nonzero = padded_prompt_embeddings.mean(dim=1) != self.embedding_tokenizer.pad_token_id
|
| 845 |
+
prompt_embeddings = self.accelerator.gather(padded_prompt_embeddings)
|
| 846 |
+
|
| 847 |
+
# cannot predict for all empty values
|
| 848 |
+
if prompt_embeddings.shape[0] == 0:
|
| 849 |
+
return torch.tensor([], device=device, dtype=dtype)
|
| 850 |
+
|
| 851 |
+
prob = self.clf.predict_proba(prompt_embeddings.cpu().float().numpy())[:, 1]
|
| 852 |
+
prob = torch.as_tensor(prob, dtype=dtype, device=device)
|
| 853 |
+
prob = self.accelerator.reduce(prob, reduction="mean")
|
| 854 |
+
|
| 855 |
+
prob = prob[sample_size * rank : sample_size * (rank + 1)]
|
| 856 |
+
prob = prob[nonzero]
|
| 857 |
+
|
| 858 |
+
return prob
|
| 859 |
+
|
| 860 |
+
def _vectorize_prompt(self, input_ids: torch.LongTensor, attention_mask: torch.LongTensor) -> torch.FloatTensor:
|
| 861 |
+
"""
|
| 862 |
+
Replaces processing_class.pad_token_id to embedding_tokenizer.pad_token_id
|
| 863 |
+
and applies self.embedding_func
|
| 864 |
+
"""
|
| 865 |
+
input_ids = torch.where(
|
| 866 |
+
input_ids == self.processing_class.pad_token_id,
|
| 867 |
+
self.embedding_tokenizer.pad_token_id,
|
| 868 |
+
input_ids,
|
| 869 |
+
)
|
| 870 |
+
|
| 871 |
+
with torch.no_grad():
|
| 872 |
+
embeddings = self.embedding_func(
|
| 873 |
+
input_ids=input_ids,
|
| 874 |
+
attention_mask=attention_mask,
|
| 875 |
+
)
|
| 876 |
+
|
| 877 |
+
return embeddings
|
| 878 |
+
|
| 879 |
+
def _get_prompt_embeddings(
|
| 880 |
+
self, batch: dict[str, Union[list, torch.LongTensor]]
|
| 881 |
+
) -> tuple[torch.FloatTensor, torch.FloatTensor]:
|
| 882 |
+
"""Extract embeddings from frozen embedding model"""
|
| 883 |
+
|
| 884 |
+
if not self.match_underlying_distribution:
|
| 885 |
+
return None, None
|
| 886 |
+
|
| 887 |
+
embeddings = self._vectorize_prompt(
|
| 888 |
+
input_ids=batch["embedding_input_ids"],
|
| 889 |
+
attention_mask=batch["embedding_attention_mask"],
|
| 890 |
+
)
|
| 891 |
+
|
| 892 |
+
chosen_idx = [i for i in range(len(batch["label"])) if batch["label"][i] is True]
|
| 893 |
+
rejected_idx = [i for i in range(len(batch["label"])) if batch["label"][i] is False]
|
| 894 |
+
|
| 895 |
+
chosen_embeddings = embeddings[chosen_idx, ...]
|
| 896 |
+
rejected_embeddings = embeddings[rejected_idx, ...]
|
| 897 |
+
|
| 898 |
+
return (chosen_embeddings, rejected_embeddings)
|
| 899 |
+
|
| 900 |
+
def _get_sample_prompt_embeddings(self, dataset: Dataset, sample_size: int = 512) -> torch.FloatTensor:
|
| 901 |
+
"""
|
| 902 |
+
Sample instances from dataset and get prompt embeddings.
|
| 903 |
+
Used for density ratio classifier training.
|
| 904 |
+
"""
|
| 905 |
+
n_samples = min(len(dataset), sample_size)
|
| 906 |
+
rand_indices = np.random.choice(len(dataset), size=(n_samples,))
|
| 907 |
+
|
| 908 |
+
embedding_dataset = dataset.select(rand_indices)
|
| 909 |
+
|
| 910 |
+
dataloader_params = {
|
| 911 |
+
"batch_size": self.args.per_device_train_batch_size,
|
| 912 |
+
"collate_fn": self.data_collator,
|
| 913 |
+
"num_workers": self.args.dataloader_num_workers,
|
| 914 |
+
"pin_memory": self.args.dataloader_pin_memory,
|
| 915 |
+
"shuffle": False,
|
| 916 |
+
}
|
| 917 |
+
|
| 918 |
+
# prepare dataloader
|
| 919 |
+
data_loader = self.accelerator.prepare(DataLoader(embedding_dataset, **dataloader_params))
|
| 920 |
+
|
| 921 |
+
with torch.no_grad():
|
| 922 |
+
all_embeddings = torch.empty(0)
|
| 923 |
+
for padded_batch in tqdm(iterable=data_loader, desc="Building sample prompt embeddings"):
|
| 924 |
+
embeddings = self._vectorize_prompt(
|
| 925 |
+
input_ids=padded_batch["embedding_input_ids"],
|
| 926 |
+
attention_mask=padded_batch["embedding_attention_mask"],
|
| 927 |
+
)
|
| 928 |
+
embeddings = self.accelerator.gather_for_metrics(embeddings)
|
| 929 |
+
all_embeddings = torch.cat((all_embeddings, embeddings.cpu()))
|
| 930 |
+
|
| 931 |
+
return all_embeddings
|
| 932 |
+
|
| 933 |
+
def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
|
| 934 |
+
# Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
|
| 935 |
+
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
|
| 936 |
+
config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config)
|
| 937 |
+
|
| 938 |
+
if model is not None:
|
| 939 |
+
if hasattr(model, "config"):
|
| 940 |
+
hidden_size = (
|
| 941 |
+
max(model.config.hidden_sizes)
|
| 942 |
+
if getattr(model.config, "hidden_sizes", None)
|
| 943 |
+
else getattr(model.config, "hidden_size", None)
|
| 944 |
+
)
|
| 945 |
+
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
|
| 946 |
+
# Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
|
| 947 |
+
# This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
|
| 948 |
+
config_kwargs.update(
|
| 949 |
+
{
|
| 950 |
+
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
|
| 951 |
+
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
|
| 952 |
+
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
|
| 953 |
+
}
|
| 954 |
+
)
|
| 955 |
+
|
| 956 |
+
# If ZeRO-3 is used, we shard both the active and reference model.
|
| 957 |
+
# Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
|
| 958 |
+
if config_kwargs["zero_optimization"]["stage"] != 3:
|
| 959 |
+
config_kwargs["zero_optimization"]["stage"] = 0
|
| 960 |
+
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
|
| 961 |
+
model.eval()
|
| 962 |
+
return model
|
| 963 |
+
|
| 964 |
+
def _save_optimizer_and_scheduler(self, output_dir):
|
| 965 |
+
super()._save_optimizer_and_scheduler(output_dir)
|
| 966 |
+
|
| 967 |
+
# When saving optimizer and scheduler to checkpoint, save also the running delta object.
|
| 968 |
+
output_dir = output_dir if output_dir is not None else self.args.output_dir
|
| 969 |
+
|
| 970 |
+
self.running.save_to_json(os.path.join(output_dir, RUNNING_NAME))
|
| 971 |
+
|
| 972 |
+
if self.match_underlying_distribution:
|
| 973 |
+
torch.save(self.clf.get_params(), os.path.join(output_dir, CLF_NAME))
|
| 974 |
+
|
| 975 |
+
def _load_optimizer_and_scheduler(self, checkpoint):
|
| 976 |
+
super()._load_optimizer_and_scheduler(checkpoint)
|
| 977 |
+
|
| 978 |
+
if checkpoint is None:
|
| 979 |
+
return
|
| 980 |
+
# when loading optimizer and scheduler from checkpoint, also load the running delta object.
|
| 981 |
+
running_file = os.path.join(checkpoint, RUNNING_NAME)
|
| 982 |
+
if os.path.isfile(running_file):
|
| 983 |
+
self.running = RunningMoments.load_from_json(self.accelerator, running_file)
|
| 984 |
+
|
| 985 |
+
if self.match_underlying_distribution:
|
| 986 |
+
clf_file = os.path.join(checkpoint, CLF_NAME)
|
| 987 |
+
if os.path.isfile(running_file):
|
| 988 |
+
self.clf.set_params(**torch.load(clf_file, weights_only=True, map_location="cpu"))
|
| 989 |
+
|
| 990 |
+
@contextmanager
|
| 991 |
+
def null_ref_context(self):
|
| 992 |
+
"""Context manager for handling null reference model (that is, peft adapter manipulation)."""
|
| 993 |
+
with (
|
| 994 |
+
self.accelerator.unwrap_model(self.model).disable_adapter()
|
| 995 |
+
if self.is_peft_model and not self.ref_adapter_name
|
| 996 |
+
else nullcontext()
|
| 997 |
+
):
|
| 998 |
+
if self.ref_adapter_name:
|
| 999 |
+
self.model.set_adapter(self.ref_adapter_name)
|
| 1000 |
+
yield
|
| 1001 |
+
if self.ref_adapter_name:
|
| 1002 |
+
self.model.set_adapter(self.model_adapter_name or "default")
|
| 1003 |
+
|
| 1004 |
+
def get_train_dataloader(self) -> DataLoader:
|
| 1005 |
+
"""
|
| 1006 |
+
Returns the training [`~torch.utils.data.DataLoader`].
|
| 1007 |
+
|
| 1008 |
+
Subclass of transformers.src.transformers.trainer.get_train_dataloader to precompute `ref_log_probs`.
|
| 1009 |
+
"""
|
| 1010 |
+
|
| 1011 |
+
if self.precompute_ref_log_probs and not self._precomputed_train_ref_log_probs:
|
| 1012 |
+
dataloader_params = {
|
| 1013 |
+
"batch_size": self.args.per_device_train_batch_size,
|
| 1014 |
+
"collate_fn": self.data_collator,
|
| 1015 |
+
"num_workers": self.args.dataloader_num_workers,
|
| 1016 |
+
"pin_memory": self.args.dataloader_pin_memory,
|
| 1017 |
+
"shuffle": False,
|
| 1018 |
+
}
|
| 1019 |
+
|
| 1020 |
+
# prepare dataloader
|
| 1021 |
+
data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params))
|
| 1022 |
+
reference_completion_logps = []
|
| 1023 |
+
|
| 1024 |
+
for padded_batch in tqdm(iterable=data_loader, desc="Train dataset reference log probs"):
|
| 1025 |
+
reference_completion_logp = self.compute_reference_log_probs(padded_batch)
|
| 1026 |
+
|
| 1027 |
+
reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp)
|
| 1028 |
+
reference_completion_logps.append(reference_completion_logp.cpu())
|
| 1029 |
+
|
| 1030 |
+
self.train_dataset = self.train_dataset.add_column(
|
| 1031 |
+
name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy()
|
| 1032 |
+
)
|
| 1033 |
+
|
| 1034 |
+
self._precomputed_train_ref_log_probs = True
|
| 1035 |
+
|
| 1036 |
+
return super().get_train_dataloader()
|
| 1037 |
+
|
| 1038 |
+
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
|
| 1039 |
+
"""
|
| 1040 |
+
Returns the evaluation [`~torch.utils.data.DataLoader`].
|
| 1041 |
+
|
| 1042 |
+
Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`.
|
| 1043 |
+
|
| 1044 |
+
Args:
|
| 1045 |
+
eval_dataset (`torch.utils.data.Dataset`, *optional*):
|
| 1046 |
+
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
|
| 1047 |
+
by the `model.forward()` method are automatically removed. It must implement `__len__`.
|
| 1048 |
+
"""
|
| 1049 |
+
if eval_dataset is None and self.eval_dataset is None:
|
| 1050 |
+
raise ValueError("Trainer: evaluation requires an eval_dataset.")
|
| 1051 |
+
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
|
| 1052 |
+
|
| 1053 |
+
if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs:
|
| 1054 |
+
dataloader_params = {
|
| 1055 |
+
"batch_size": self.args.per_device_eval_batch_size,
|
| 1056 |
+
"collate_fn": self.data_collator,
|
| 1057 |
+
"num_workers": self.args.dataloader_num_workers,
|
| 1058 |
+
"pin_memory": self.args.dataloader_pin_memory,
|
| 1059 |
+
"shuffle": False,
|
| 1060 |
+
}
|
| 1061 |
+
|
| 1062 |
+
# prepare dataloader
|
| 1063 |
+
data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params))
|
| 1064 |
+
|
| 1065 |
+
reference_completion_logps = []
|
| 1066 |
+
|
| 1067 |
+
for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"):
|
| 1068 |
+
reference_completion_logp = self.compute_reference_log_probs(padded_batch)
|
| 1069 |
+
|
| 1070 |
+
reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp)
|
| 1071 |
+
reference_completion_logps.append(reference_completion_logp.cpu())
|
| 1072 |
+
|
| 1073 |
+
eval_dataset = eval_dataset.add_column(
|
| 1074 |
+
name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy()
|
| 1075 |
+
)
|
| 1076 |
+
|
| 1077 |
+
# Save calculated reference_chosen_logps and reference_rejected_logps to the eval_dataset for subsequent runs
|
| 1078 |
+
if self.eval_dataset is not None:
|
| 1079 |
+
self.eval_dataset = eval_dataset
|
| 1080 |
+
self._precomputed_eval_ref_log_probs = True
|
| 1081 |
+
|
| 1082 |
+
return super().get_eval_dataloader(eval_dataset=eval_dataset)
|
| 1083 |
+
|
| 1084 |
+
def compute_reference_log_probs(self, padded_batch: dict) -> dict:
|
| 1085 |
+
"""Computes log probabilities of the reference model for a single padded batch of a BCO specific dataset."""
|
| 1086 |
+
with torch.no_grad():
|
| 1087 |
+
if self.ref_model is None:
|
| 1088 |
+
with self.null_ref_context():
|
| 1089 |
+
if self.is_encoder_decoder:
|
| 1090 |
+
completion_logits = self.model(
|
| 1091 |
+
padded_batch["prompt_input_ids"],
|
| 1092 |
+
attention_mask=padded_batch["prompt_attention_mask"],
|
| 1093 |
+
decoder_input_ids=padded_batch.get("completion_decoder_input_ids"),
|
| 1094 |
+
labels=padded_batch["completion_labels"],
|
| 1095 |
+
).logits
|
| 1096 |
+
|
| 1097 |
+
else:
|
| 1098 |
+
completion_logits = self.model(
|
| 1099 |
+
padded_batch["completion_input_ids"],
|
| 1100 |
+
attention_mask=padded_batch["completion_attention_mask"],
|
| 1101 |
+
).logits
|
| 1102 |
+
|
| 1103 |
+
else:
|
| 1104 |
+
if self.is_encoder_decoder:
|
| 1105 |
+
completion_logits = self.ref_model(
|
| 1106 |
+
padded_batch["prompt_input_ids"],
|
| 1107 |
+
attention_mask=padded_batch["prompt_attention_mask"],
|
| 1108 |
+
decoder_input_ids=padded_batch.get("completion_decoder_input_ids"),
|
| 1109 |
+
labels=padded_batch["completion_labels"],
|
| 1110 |
+
).logits
|
| 1111 |
+
|
| 1112 |
+
else:
|
| 1113 |
+
completion_logits = self.ref_model(
|
| 1114 |
+
padded_batch["completion_input_ids"], attention_mask=padded_batch["completion_attention_mask"]
|
| 1115 |
+
).logits
|
| 1116 |
+
|
| 1117 |
+
completion_logps = self.get_batch_logps(
|
| 1118 |
+
completion_logits,
|
| 1119 |
+
padded_batch["completion_labels"],
|
| 1120 |
+
average_log_prob=False,
|
| 1121 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 1122 |
+
label_pad_token_id=self.label_pad_token_id,
|
| 1123 |
+
)
|
| 1124 |
+
|
| 1125 |
+
return completion_logps
|
| 1126 |
+
|
| 1127 |
+
@staticmethod
|
| 1128 |
+
def get_batch_logps(
|
| 1129 |
+
logits: torch.FloatTensor,
|
| 1130 |
+
labels: torch.LongTensor,
|
| 1131 |
+
average_log_prob: bool = False,
|
| 1132 |
+
label_pad_token_id: int = -100,
|
| 1133 |
+
is_encoder_decoder: bool = False,
|
| 1134 |
+
) -> torch.FloatTensor:
|
| 1135 |
+
"""Compute the log probabilities of the given labels under the given logits.
|
| 1136 |
+
|
| 1137 |
+
Args:
|
| 1138 |
+
logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
|
| 1139 |
+
labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)
|
| 1140 |
+
average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.
|
| 1141 |
+
|
| 1142 |
+
Returns:
|
| 1143 |
+
A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.
|
| 1144 |
+
"""
|
| 1145 |
+
if logits.shape[:-1] != labels.shape:
|
| 1146 |
+
raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
|
| 1147 |
+
|
| 1148 |
+
if not is_encoder_decoder:
|
| 1149 |
+
labels = labels[:, 1:].clone()
|
| 1150 |
+
logits = logits[:, :-1, :]
|
| 1151 |
+
else:
|
| 1152 |
+
# Fixes end-dec RuntimeError
|
| 1153 |
+
labels = labels.clone()
|
| 1154 |
+
|
| 1155 |
+
loss_mask = labels != label_pad_token_id
|
| 1156 |
+
|
| 1157 |
+
# dummy token; we'll ignore the losses on these tokens later
|
| 1158 |
+
labels[labels == label_pad_token_id] = 0
|
| 1159 |
+
|
| 1160 |
+
per_token_logps = selective_log_softmax(logits, labels)
|
| 1161 |
+
|
| 1162 |
+
if average_log_prob:
|
| 1163 |
+
return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
|
| 1164 |
+
else:
|
| 1165 |
+
return (per_token_logps * loss_mask).sum(-1)
|
| 1166 |
+
|
| 1167 |
+
def forward(
|
| 1168 |
+
self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]
|
| 1169 |
+
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
| 1170 |
+
model_kwargs = (
|
| 1171 |
+
{
|
| 1172 |
+
"labels": batch["completion_labels"],
|
| 1173 |
+
"decoder_input_ids": batch.get("completion_decoder_input_ids"),
|
| 1174 |
+
}
|
| 1175 |
+
if self.is_encoder_decoder
|
| 1176 |
+
else {}
|
| 1177 |
+
)
|
| 1178 |
+
if self.aux_loss_enabled:
|
| 1179 |
+
model_kwargs["output_router_logits"] = True
|
| 1180 |
+
|
| 1181 |
+
outputs = model(
|
| 1182 |
+
batch["completion_input_ids"],
|
| 1183 |
+
attention_mask=batch["completion_attention_mask"],
|
| 1184 |
+
**model_kwargs,
|
| 1185 |
+
)
|
| 1186 |
+
completion_logits = outputs.logits
|
| 1187 |
+
|
| 1188 |
+
completion_logps = self.get_batch_logps(
|
| 1189 |
+
completion_logits,
|
| 1190 |
+
batch["completion_labels"],
|
| 1191 |
+
average_log_prob=False,
|
| 1192 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 1193 |
+
label_pad_token_id=self.label_pad_token_id,
|
| 1194 |
+
)
|
| 1195 |
+
|
| 1196 |
+
if completion_logps.shape[0] != len(batch["label"]):
|
| 1197 |
+
raise ValueError(
|
| 1198 |
+
"There is a mismatch between the number of examples in this batch and the number of "
|
| 1199 |
+
"examples for which an output sequence was predicted."
|
| 1200 |
+
)
|
| 1201 |
+
|
| 1202 |
+
chosen_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is True]
|
| 1203 |
+
rejected_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is False]
|
| 1204 |
+
|
| 1205 |
+
chosen_logps = completion_logps[chosen_idx, ...]
|
| 1206 |
+
rejected_logps = completion_logps[rejected_idx, ...]
|
| 1207 |
+
|
| 1208 |
+
chosen_logits = completion_logits[chosen_idx, ...]
|
| 1209 |
+
rejected_logits = completion_logits[rejected_idx, ...]
|
| 1210 |
+
|
| 1211 |
+
if self.aux_loss_enabled:
|
| 1212 |
+
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, outputs.aux_loss)
|
| 1213 |
+
else:
|
| 1214 |
+
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits)
|
| 1215 |
+
|
| 1216 |
+
def _get_udm_weight(self, rejected_embeddings: torch.FloatTensor) -> torch.FloatTensor:
|
| 1217 |
+
prob_desirable = self._get_chosen_prob(rejected_embeddings)
|
| 1218 |
+
min_ratio = self.args.min_density_ratio
|
| 1219 |
+
max_ratio = self.args.max_density_ratio
|
| 1220 |
+
|
| 1221 |
+
weight = (prob_desirable / (1 - prob_desirable + 1e-8)).clamp(min=min_ratio, max=max_ratio)
|
| 1222 |
+
|
| 1223 |
+
return weight
|
| 1224 |
+
|
| 1225 |
+
def bco_loss(
|
| 1226 |
+
self,
|
| 1227 |
+
policy_chosen_logps: torch.FloatTensor,
|
| 1228 |
+
policy_rejected_logps: torch.FloatTensor,
|
| 1229 |
+
reference_chosen_logps: torch.FloatTensor,
|
| 1230 |
+
reference_rejected_logps: torch.FloatTensor,
|
| 1231 |
+
chosen_embeddings: Optional[torch.FloatTensor],
|
| 1232 |
+
rejected_embeddings: Optional[torch.FloatTensor],
|
| 1233 |
+
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
| 1234 |
+
"""Compute the BCO loss for a batch of policy and reference model log probabilities.
|
| 1235 |
+
|
| 1236 |
+
Args:
|
| 1237 |
+
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (num(chosen) in batch_size,)
|
| 1238 |
+
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (num(rejected) in batch_size,)
|
| 1239 |
+
reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (num(chosen) in batch_size,)
|
| 1240 |
+
reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (num(rejected) in batch_size,)
|
| 1241 |
+
chosen_embeddings: embeddings of desirable prompts
|
| 1242 |
+
rejected_embeddings: embeddings of undesirable prompts
|
| 1243 |
+
|
| 1244 |
+
Returns:
|
| 1245 |
+
A tuple of four tensors: (losses, chosen_rewards, rejected_rewards, delta).
|
| 1246 |
+
The losses tensor contains the BCO loss for each example in the batch.
|
| 1247 |
+
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
|
| 1248 |
+
The delta value contains the moving average of all implicit rewards.
|
| 1249 |
+
"""
|
| 1250 |
+
|
| 1251 |
+
if policy_chosen_logps.shape[0] != 0 or reference_chosen_logps.shape[0] != 0:
|
| 1252 |
+
chosen_logratios = policy_chosen_logps - reference_chosen_logps
|
| 1253 |
+
chosen_rewards = self.beta * chosen_logratios
|
| 1254 |
+
else:
|
| 1255 |
+
# lists can't be empty -- if they are, then accelerate.gather will hang
|
| 1256 |
+
chosen_losses = torch.Tensor([]).to(self.accelerator.device)
|
| 1257 |
+
chosen_rewards = torch.Tensor([]).to(self.accelerator.device)
|
| 1258 |
+
|
| 1259 |
+
if policy_rejected_logps.shape[0] != 0 or reference_rejected_logps.shape[0] != 0:
|
| 1260 |
+
rejected_logratios = policy_rejected_logps - reference_rejected_logps
|
| 1261 |
+
rejected_rewards = self.beta * rejected_logratios
|
| 1262 |
+
else:
|
| 1263 |
+
# lists can't be empty -- if they are, then accelerate.gather will hang
|
| 1264 |
+
rejected_losses = torch.Tensor([]).to(self.accelerator.device)
|
| 1265 |
+
rejected_rewards = torch.Tensor([]).to(self.accelerator.device)
|
| 1266 |
+
|
| 1267 |
+
rewards = torch.cat((chosen_rewards, rejected_rewards), 0).mean().detach()
|
| 1268 |
+
self.running.update(rewards)
|
| 1269 |
+
delta = self.running.mean
|
| 1270 |
+
|
| 1271 |
+
if policy_chosen_logps.shape[0] != 0 or reference_chosen_logps.shape[0] != 0:
|
| 1272 |
+
chosen_losses = -F.logsigmoid(chosen_rewards - delta)
|
| 1273 |
+
|
| 1274 |
+
if policy_rejected_logps.shape[0] != 0 or reference_rejected_logps.shape[0] != 0:
|
| 1275 |
+
rejected_losses = -F.logsigmoid(-(rejected_rewards - delta))
|
| 1276 |
+
|
| 1277 |
+
if self.match_underlying_distribution:
|
| 1278 |
+
chosen_weight = torch.ones_like(chosen_losses)
|
| 1279 |
+
rejected_weight = self._get_udm_weight(rejected_embeddings)
|
| 1280 |
+
|
| 1281 |
+
losses = torch.cat((chosen_weight * chosen_losses, rejected_weight * rejected_losses), dim=0)
|
| 1282 |
+
else:
|
| 1283 |
+
losses = torch.cat((chosen_losses, rejected_losses), dim=0)
|
| 1284 |
+
|
| 1285 |
+
return losses, chosen_rewards, rejected_rewards, torch.as_tensor(delta)
|
| 1286 |
+
|
| 1287 |
+
def get_batch_loss_metrics(
|
| 1288 |
+
self,
|
| 1289 |
+
model,
|
| 1290 |
+
batch: dict[str, Union[list, torch.LongTensor]],
|
| 1291 |
+
):
|
| 1292 |
+
"""Compute the BCO loss and other metrics for the given batch of inputs for train or test."""
|
| 1293 |
+
metrics = {}
|
| 1294 |
+
batch = {k: (v.to(self.accelerator.device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
|
| 1295 |
+
|
| 1296 |
+
forward_output = self.forward(model, batch)
|
| 1297 |
+
(
|
| 1298 |
+
policy_chosen_logps,
|
| 1299 |
+
policy_rejected_logps,
|
| 1300 |
+
policy_chosen_logits,
|
| 1301 |
+
policy_rejected_logits,
|
| 1302 |
+
) = forward_output[:4]
|
| 1303 |
+
if self.aux_loss_enabled:
|
| 1304 |
+
aux_loss = forward_output[4]
|
| 1305 |
+
|
| 1306 |
+
# if reference_logps in batch use them, otherwise use the reference model
|
| 1307 |
+
if "reference_logps" in batch:
|
| 1308 |
+
chosen_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is True]
|
| 1309 |
+
rejected_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is False]
|
| 1310 |
+
|
| 1311 |
+
reference_chosen_logps = batch["reference_logps"][chosen_idx, ...]
|
| 1312 |
+
reference_rejected_logps = batch["reference_logps"][rejected_idx, ...]
|
| 1313 |
+
else:
|
| 1314 |
+
with torch.no_grad():
|
| 1315 |
+
if self.ref_model is None:
|
| 1316 |
+
with self.null_ref_context():
|
| 1317 |
+
(
|
| 1318 |
+
reference_chosen_logps,
|
| 1319 |
+
reference_rejected_logps,
|
| 1320 |
+
_,
|
| 1321 |
+
_,
|
| 1322 |
+
) = self.forward(self.model, batch)[:4]
|
| 1323 |
+
else:
|
| 1324 |
+
(
|
| 1325 |
+
reference_chosen_logps,
|
| 1326 |
+
reference_rejected_logps,
|
| 1327 |
+
_,
|
| 1328 |
+
_,
|
| 1329 |
+
) = self.forward(self.ref_model, batch)[:4]
|
| 1330 |
+
|
| 1331 |
+
chosen_embeddings, rejected_embeddings = self._get_prompt_embeddings(batch)
|
| 1332 |
+
|
| 1333 |
+
losses, chosen_rewards, rejected_rewards, delta = self.bco_loss(
|
| 1334 |
+
policy_chosen_logps,
|
| 1335 |
+
policy_rejected_logps,
|
| 1336 |
+
reference_chosen_logps,
|
| 1337 |
+
reference_rejected_logps,
|
| 1338 |
+
chosen_embeddings,
|
| 1339 |
+
rejected_embeddings,
|
| 1340 |
+
)
|
| 1341 |
+
metrics["delta"] = self.accelerator.gather_for_metrics(delta).mean().item()
|
| 1342 |
+
|
| 1343 |
+
num_chosen = torch.Tensor([len(chosen_rewards)]).to(self.accelerator.device)
|
| 1344 |
+
num_rejected = torch.Tensor([len(rejected_rewards)]).to(self.accelerator.device)
|
| 1345 |
+
|
| 1346 |
+
all_num_chosen = self.accelerator.gather_for_metrics(num_chosen).sum().item()
|
| 1347 |
+
all_num_rejected = self.accelerator.gather_for_metrics(num_rejected).sum().item()
|
| 1348 |
+
|
| 1349 |
+
if all_num_chosen > 0:
|
| 1350 |
+
metrics["rewards/chosen_sum"] = (
|
| 1351 |
+
self.accelerator.gather_for_metrics(chosen_rewards.nansum()).nansum().item()
|
| 1352 |
+
)
|
| 1353 |
+
metrics["logps/chosen_sum"] = (
|
| 1354 |
+
self.accelerator.gather_for_metrics(policy_chosen_logps.nansum()).nansum().item()
|
| 1355 |
+
)
|
| 1356 |
+
metrics["logits/chosen_sum"] = (
|
| 1357 |
+
self.accelerator.gather_for_metrics(policy_chosen_logits.nansum()).nansum().item()
|
| 1358 |
+
)
|
| 1359 |
+
metrics["count/chosen"] = all_num_chosen
|
| 1360 |
+
|
| 1361 |
+
if all_num_rejected > 0:
|
| 1362 |
+
metrics["rewards/rejected_sum"] = (
|
| 1363 |
+
self.accelerator.gather_for_metrics(rejected_rewards.nansum()).nansum().item()
|
| 1364 |
+
)
|
| 1365 |
+
metrics["logps/rejected_sum"] = (
|
| 1366 |
+
self.accelerator.gather_for_metrics(policy_rejected_logps.nansum()).nansum().item()
|
| 1367 |
+
)
|
| 1368 |
+
metrics["logits/rejected_sum"] = (
|
| 1369 |
+
self.accelerator.gather_for_metrics(policy_rejected_logits.nansum()).nansum().item()
|
| 1370 |
+
)
|
| 1371 |
+
metrics["count/rejected"] = all_num_rejected
|
| 1372 |
+
|
| 1373 |
+
loss = losses.nanmean()
|
| 1374 |
+
if self.aux_loss_enabled:
|
| 1375 |
+
loss += self.aux_loss_coef * aux_loss
|
| 1376 |
+
|
| 1377 |
+
return loss, metrics
|
| 1378 |
+
|
| 1379 |
+
def compute_loss(
|
| 1380 |
+
self,
|
| 1381 |
+
model: Union[PreTrainedModel, nn.Module],
|
| 1382 |
+
inputs: dict[str, Union[torch.Tensor, Any]],
|
| 1383 |
+
return_outputs=False,
|
| 1384 |
+
num_items_in_batch=None,
|
| 1385 |
+
) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
|
| 1386 |
+
compute_loss_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1387 |
+
|
| 1388 |
+
with compute_loss_context_manager:
|
| 1389 |
+
loss, metrics = self.get_batch_loss_metrics(model, inputs)
|
| 1390 |
+
|
| 1391 |
+
# Make sure to move the loss to the device the original accumulating loss is at back in the `Trainer` class:
|
| 1392 |
+
loss = loss.to(self.args.device)
|
| 1393 |
+
# force log the metrics
|
| 1394 |
+
if self.accelerator.is_main_process:
|
| 1395 |
+
self.store_metrics(metrics, train_eval="train")
|
| 1396 |
+
|
| 1397 |
+
if return_outputs:
|
| 1398 |
+
return (loss, metrics)
|
| 1399 |
+
return loss
|
| 1400 |
+
|
| 1401 |
+
def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
|
| 1402 |
+
for key, value in metrics.items():
|
| 1403 |
+
self._stored_metrics[train_eval][key].append(value)
|
| 1404 |
+
|
| 1405 |
+
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
|
| 1406 |
+
if self.train_dataset is None or not has_length(self.train_dataset):
|
| 1407 |
+
return None
|
| 1408 |
+
return SequentialSampler(self.train_dataset)
|
| 1409 |
+
|
| 1410 |
+
def generate_from_model_and_ref(self, model, batch: dict[str, torch.LongTensor]) -> tuple[str, str]:
|
| 1411 |
+
"""Generate samples from the model and reference model for the given batch of inputs."""
|
| 1412 |
+
|
| 1413 |
+
# If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
|
| 1414 |
+
# the torch cuda amp context manager as some hidden states are silently casted to full precision.
|
| 1415 |
+
generate_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1416 |
+
with generate_context_manager:
|
| 1417 |
+
policy_output = model.generate(
|
| 1418 |
+
input_ids=batch["prompt_input_ids"],
|
| 1419 |
+
attention_mask=batch["prompt_attention_mask"],
|
| 1420 |
+
max_length=self.max_length,
|
| 1421 |
+
do_sample=True,
|
| 1422 |
+
pad_token_id=self.processing_class.pad_token_id,
|
| 1423 |
+
)
|
| 1424 |
+
|
| 1425 |
+
# if reference_output in batch use that otherwise use the reference model
|
| 1426 |
+
if "reference_output" in batch:
|
| 1427 |
+
reference_output = batch["reference_output"]
|
| 1428 |
+
else:
|
| 1429 |
+
if self.ref_model is None:
|
| 1430 |
+
with self.null_ref_context():
|
| 1431 |
+
reference_output = self.model.generate(
|
| 1432 |
+
input_ids=batch["prompt_input_ids"],
|
| 1433 |
+
attention_mask=batch["prompt_attention_mask"],
|
| 1434 |
+
max_length=self.max_length,
|
| 1435 |
+
do_sample=True,
|
| 1436 |
+
pad_token_id=self.processing_class.pad_token_id,
|
| 1437 |
+
)
|
| 1438 |
+
else:
|
| 1439 |
+
reference_output = self.ref_model.generate(
|
| 1440 |
+
input_ids=batch["prompt_input_ids"],
|
| 1441 |
+
attention_mask=batch["prompt_attention_mask"],
|
| 1442 |
+
max_length=self.max_length,
|
| 1443 |
+
do_sample=True,
|
| 1444 |
+
pad_token_id=self.processing_class.pad_token_id,
|
| 1445 |
+
)
|
| 1446 |
+
|
| 1447 |
+
policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
|
| 1448 |
+
policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
|
| 1449 |
+
|
| 1450 |
+
reference_output = pad_to_length(reference_output, self.max_length, self.processing_class.pad_token_id)
|
| 1451 |
+
reference_output_decoded = self.processing_class.batch_decode(reference_output, skip_special_tokens=True)
|
| 1452 |
+
|
| 1453 |
+
return policy_output_decoded, reference_output_decoded
|
| 1454 |
+
|
| 1455 |
+
def prediction_step(
|
| 1456 |
+
self,
|
| 1457 |
+
model: Union[PreTrainedModel, nn.Module],
|
| 1458 |
+
inputs: dict[str, Union[torch.Tensor, Any]],
|
| 1459 |
+
prediction_loss_only: bool,
|
| 1460 |
+
ignore_keys: Optional[list[str]] = None,
|
| 1461 |
+
):
|
| 1462 |
+
if ignore_keys is None:
|
| 1463 |
+
if hasattr(model, "config"):
|
| 1464 |
+
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
|
| 1465 |
+
else:
|
| 1466 |
+
ignore_keys = []
|
| 1467 |
+
|
| 1468 |
+
prediction_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1469 |
+
with torch.no_grad(), prediction_context_manager:
|
| 1470 |
+
loss, metrics = self.get_batch_loss_metrics(model, inputs)
|
| 1471 |
+
|
| 1472 |
+
# force log the metrics
|
| 1473 |
+
if self.accelerator.is_main_process:
|
| 1474 |
+
self.store_metrics(metrics, train_eval="eval")
|
| 1475 |
+
|
| 1476 |
+
if prediction_loss_only:
|
| 1477 |
+
return (loss.detach(), None, None)
|
| 1478 |
+
|
| 1479 |
+
# logits for the chosen and rejected samples from model
|
| 1480 |
+
logits_dict = {
|
| 1481 |
+
"eval_logits/chosen": metrics["logits/chosen"],
|
| 1482 |
+
"eval_logits/rejected": metrics["logits/rejected"],
|
| 1483 |
+
}
|
| 1484 |
+
logits = tuple(v.unsqueeze(dim=0) for k, v in logits_dict.items() if k not in ignore_keys)
|
| 1485 |
+
logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device)
|
| 1486 |
+
labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
|
| 1487 |
+
|
| 1488 |
+
return (loss.detach(), logits, labels)
|
| 1489 |
+
|
| 1490 |
+
def evaluation_loop(
|
| 1491 |
+
self,
|
| 1492 |
+
dataloader: DataLoader,
|
| 1493 |
+
description: str,
|
| 1494 |
+
prediction_loss_only: Optional[bool] = None,
|
| 1495 |
+
ignore_keys: Optional[list[str]] = None,
|
| 1496 |
+
metric_key_prefix: str = "eval",
|
| 1497 |
+
) -> EvalLoopOutput:
|
| 1498 |
+
"""
|
| 1499 |
+
Overriding built-in evaluation loop to store metrics for each batch.
|
| 1500 |
+
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
|
| 1501 |
+
|
| 1502 |
+
Works both with or without labels.
|
| 1503 |
+
"""
|
| 1504 |
+
|
| 1505 |
+
# Sample and save to game log if requested (for one batch to save time)
|
| 1506 |
+
if self.generate_during_eval:
|
| 1507 |
+
# Generate random indices within the range of the total number of samples
|
| 1508 |
+
num_samples = len(dataloader.dataset)
|
| 1509 |
+
random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
|
| 1510 |
+
|
| 1511 |
+
# Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
|
| 1512 |
+
random_batch_dataset = dataloader.dataset.select(random_indices)
|
| 1513 |
+
random_batch = self.data_collator(random_batch_dataset)
|
| 1514 |
+
random_batch = self._prepare_inputs(random_batch)
|
| 1515 |
+
|
| 1516 |
+
target_indicies = [i for i in range(len(random_batch["label"])) if random_batch["label"][i] is False]
|
| 1517 |
+
target_batch = {
|
| 1518 |
+
"prompt_input_ids": random_batch["prompt_input_ids"][target_indicies],
|
| 1519 |
+
"prompt_attention_mask": random_batch["prompt_attention_mask"][target_indicies],
|
| 1520 |
+
"prompt": itemgetter(*target_indicies)(random_batch["prompt"]),
|
| 1521 |
+
}
|
| 1522 |
+
policy_output_decoded, ref_output_decoded = self.generate_from_model_and_ref(self.model, target_batch)
|
| 1523 |
+
|
| 1524 |
+
table = pd.DataFrame(
|
| 1525 |
+
columns=["Prompt", "Policy", "Ref Model"],
|
| 1526 |
+
data=[
|
| 1527 |
+
[prompt, pol[len(prompt) :], ref[len(prompt) :]]
|
| 1528 |
+
for prompt, pol, ref in zip(target_batch["prompt"], policy_output_decoded, ref_output_decoded)
|
| 1529 |
+
],
|
| 1530 |
+
)
|
| 1531 |
+
if "wandb" in self.args.report_to:
|
| 1532 |
+
wandb.log({"game_log": wandb.Table(data=table)})
|
| 1533 |
+
|
| 1534 |
+
if "comet_ml" in self.args.report_to:
|
| 1535 |
+
log_table_to_comet_experiment(
|
| 1536 |
+
name="game_log.csv",
|
| 1537 |
+
table=table,
|
| 1538 |
+
)
|
| 1539 |
+
|
| 1540 |
+
# Base evaluation
|
| 1541 |
+
initial_output = super().evaluation_loop(
|
| 1542 |
+
dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
|
| 1543 |
+
)
|
| 1544 |
+
|
| 1545 |
+
return initial_output
|
| 1546 |
+
|
| 1547 |
+
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
|
| 1548 |
+
"""
|
| 1549 |
+
Log `logs` on the various objects watching training, including stored metrics.
|
| 1550 |
+
|
| 1551 |
+
Args:
|
| 1552 |
+
logs (`dict[str, float]`):
|
| 1553 |
+
The values to log.
|
| 1554 |
+
start_time (`float` or `None`, *optional*, defaults to `None`):
|
| 1555 |
+
Start time of the training.
|
| 1556 |
+
"""
|
| 1557 |
+
# logs either has 'loss' or 'eval_loss'
|
| 1558 |
+
train_eval = "train" if "loss" in logs else "eval"
|
| 1559 |
+
# train metrics should have no prefix, eval should have 'eval_'
|
| 1560 |
+
prefix = "eval_" if train_eval == "eval" else ""
|
| 1561 |
+
# accumulate average metrics from sums and lengths
|
| 1562 |
+
for split in ["chosen", "rejected"]:
|
| 1563 |
+
if f"count/{split}" in self._stored_metrics[train_eval]:
|
| 1564 |
+
count_sum = torch.Tensor(self._stored_metrics[train_eval][f"count/{split}"]).sum().item()
|
| 1565 |
+
for metric in ["rewards", "logps", "logits"]:
|
| 1566 |
+
logs[f"{prefix}{metric}/{split}"] = (
|
| 1567 |
+
torch.Tensor(self._stored_metrics[train_eval][f"{metric}/{split}_sum"]).sum().item()
|
| 1568 |
+
/ count_sum
|
| 1569 |
+
)
|
| 1570 |
+
# delete obsolete metric
|
| 1571 |
+
del self._stored_metrics[train_eval][f"{metric}/{split}_sum"]
|
| 1572 |
+
del self._stored_metrics[train_eval][f"count/{split}"]
|
| 1573 |
+
# calculate reward margin
|
| 1574 |
+
if f"{prefix}rewards/chosen" in logs and f"{prefix}rewards/rejected" in logs:
|
| 1575 |
+
logs[f"{prefix}rewards/margins"] = logs[f"{prefix}rewards/chosen"] - logs[f"{prefix}rewards/rejected"]
|
| 1576 |
+
# Add averaged stored metrics to logs
|
| 1577 |
+
for key, metrics in self._stored_metrics[train_eval].items():
|
| 1578 |
+
logs[f"{prefix}{key}"] = torch.Tensor(metrics).mean().item()
|
| 1579 |
+
del self._stored_metrics[train_eval]
|
| 1580 |
+
|
| 1581 |
+
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
|
| 1582 |
+
return super().log(logs, start_time)
|
| 1583 |
+
else: # transformers<=4.46
|
| 1584 |
+
return super().log(logs)
|
| 1585 |
+
|
| 1586 |
+
def create_model_card(
|
| 1587 |
+
self,
|
| 1588 |
+
model_name: Optional[str] = None,
|
| 1589 |
+
dataset_name: Optional[str] = None,
|
| 1590 |
+
tags: Union[str, list[str], None] = None,
|
| 1591 |
+
):
|
| 1592 |
+
"""
|
| 1593 |
+
Creates a draft of a model card using the information available to the `Trainer`.
|
| 1594 |
+
|
| 1595 |
+
Args:
|
| 1596 |
+
model_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1597 |
+
Name of the model.
|
| 1598 |
+
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1599 |
+
Name of the dataset used for training.
|
| 1600 |
+
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
| 1601 |
+
Tags to be associated with the model card.
|
| 1602 |
+
"""
|
| 1603 |
+
if not self.is_world_process_zero():
|
| 1604 |
+
return
|
| 1605 |
+
|
| 1606 |
+
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
| 1607 |
+
base_model = self.model.config._name_or_path
|
| 1608 |
+
else:
|
| 1609 |
+
base_model = None
|
| 1610 |
+
|
| 1611 |
+
tags = tags or []
|
| 1612 |
+
if isinstance(tags, str):
|
| 1613 |
+
tags = [tags]
|
| 1614 |
+
|
| 1615 |
+
if hasattr(self.model.config, "unsloth_version"):
|
| 1616 |
+
tags.append("unsloth")
|
| 1617 |
+
|
| 1618 |
+
citation = textwrap.dedent("""\
|
| 1619 |
+
@article{jung2024binary,
|
| 1620 |
+
title = {{Binary Classifier Optimization for Large Language Model Alignment}},
|
| 1621 |
+
author = {Seungjae Jung and Gunsoo Han and Daniel Wontae Nam and Kyoung{-}Woon On},
|
| 1622 |
+
year = 2024,
|
| 1623 |
+
eprint = {arXiv:2404.04656}
|
| 1624 |
+
}""")
|
| 1625 |
+
|
| 1626 |
+
model_card = generate_model_card(
|
| 1627 |
+
base_model=base_model,
|
| 1628 |
+
model_name=model_name,
|
| 1629 |
+
hub_model_id=self.hub_model_id,
|
| 1630 |
+
dataset_name=dataset_name,
|
| 1631 |
+
tags=tags,
|
| 1632 |
+
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
| 1633 |
+
comet_url=get_comet_experiment_url(),
|
| 1634 |
+
trainer_name="BCO",
|
| 1635 |
+
trainer_citation=citation,
|
| 1636 |
+
paper_title="Binary Classifier Optimization for Large Language Model Alignment",
|
| 1637 |
+
paper_id="2404.04656",
|
| 1638 |
+
)
|
| 1639 |
+
|
| 1640 |
+
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
| 1641 |
+
class UnslothBCOTrainer(_UnslothBCOTrainer):
|
| 1642 |
+
"""
|
| 1643 |
+
|
| 1644 |
+
Initialize BCOTrainer from [BCO](https://huggingface.co/papers/2404.04656) paper.
|
| 1645 |
+
|
| 1646 |
+
Args:
|
| 1647 |
+
model (`transformers.PreTrainedModel`):
|
| 1648 |
+
The model to train, preferably an `AutoModelForSequenceClassification`.
|
| 1649 |
+
ref_model (`PreTrainedModelWrapper`):
|
| 1650 |
+
Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no
|
| 1651 |
+
reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized.
|
| 1652 |
+
args (`BCOConfig`):
|
| 1653 |
+
The arguments to use for training.
|
| 1654 |
+
train_dataset (`datasets.Dataset`):
|
| 1655 |
+
The dataset to use for training.
|
| 1656 |
+
eval_dataset (`datasets.Dataset`):
|
| 1657 |
+
The dataset to use for evaluation.
|
| 1658 |
+
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
|
| 1659 |
+
Processing class used to process the data. If provided, will be used to automatically process the inputs
|
| 1660 |
+
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
|
| 1661 |
+
reuse the fine-tuned model.
|
| 1662 |
+
data_collator (`transformers.DataCollator`, *optional*, defaults to `None`):
|
| 1663 |
+
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
|
| 1664 |
+
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
|
| 1665 |
+
model_init (`Callable[[], transformers.PreTrainedModel]`):
|
| 1666 |
+
The model initializer to use for training. If None is specified, the default model initializer will be used.
|
| 1667 |
+
callbacks (`list[transformers.TrainerCallback]`):
|
| 1668 |
+
The callbacks to use for training.
|
| 1669 |
+
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
|
| 1670 |
+
The optimizer and scheduler to use for training.
|
| 1671 |
+
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
|
| 1672 |
+
The function to use to preprocess the logits before computing the metrics.
|
| 1673 |
+
peft_config (`dict`, defaults to `None`):
|
| 1674 |
+
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
|
| 1675 |
+
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
|
| 1676 |
+
The function to use to compute the metrics. Must take a `EvalPrediction` and return
|
| 1677 |
+
a dictionary string to metric values.
|
| 1678 |
+
model_adapter_name (`str`, defaults to `None`):
|
| 1679 |
+
Name of the train target PEFT adapter, when using LoRA with multiple adapters.
|
| 1680 |
+
ref_adapter_name (`str`, defaults to `None`):
|
| 1681 |
+
Name of the reference PEFT adapter, when using LoRA with multiple adapters.
|
| 1682 |
+
|
| 1683 |
+
"""
|
| 1684 |
+
def __init__(
|
| 1685 |
+
self,
|
| 1686 |
+
model = None,
|
| 1687 |
+
ref_model = None,
|
| 1688 |
+
args = None,
|
| 1689 |
+
train_dataset = None,
|
| 1690 |
+
eval_dataset = None,
|
| 1691 |
+
processing_class = None,
|
| 1692 |
+
data_collator = None,
|
| 1693 |
+
model_init = None,
|
| 1694 |
+
callbacks = None,
|
| 1695 |
+
preprocess_logits_for_metrics = None,
|
| 1696 |
+
peft_config = None,
|
| 1697 |
+
compute_metrics = None,
|
| 1698 |
+
model_adapter_name = None,
|
| 1699 |
+
ref_adapter_name = None,
|
| 1700 |
+
embedding_func = None,
|
| 1701 |
+
embedding_tokenizer = None,
|
| 1702 |
+
**kwargs
|
| 1703 |
+
):
|
| 1704 |
+
if args is None: args = UnslothBCOConfig()
|
| 1705 |
+
use_bf16 = getattr(args, 'bf16', False)
|
| 1706 |
+
use_fp16 = getattr(args, 'fp16', False)
|
| 1707 |
+
force_float32 = False
|
| 1708 |
+
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
| 1709 |
+
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
| 1710 |
+
force_float32 = True
|
| 1711 |
+
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
| 1712 |
+
dtype = getattr(model.config, 'torch_dtype', None)
|
| 1713 |
+
if dtype is None: dtype = model.get_input_embeddings().dtype
|
| 1714 |
+
from unsloth_zoo.utils import _get_dtype
|
| 1715 |
+
dtype = _get_dtype(dtype)
|
| 1716 |
+
float16 = dtype == torch.float16
|
| 1717 |
+
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
| 1718 |
+
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
| 1719 |
+
if force_float32:
|
| 1720 |
+
args.fp16 = False
|
| 1721 |
+
args.bf16 = False
|
| 1722 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
| 1723 |
+
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
| 1724 |
+
args.fp16 = float16
|
| 1725 |
+
args.bf16 = not float16
|
| 1726 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
| 1727 |
+
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
| 1728 |
+
args.eval_strategy = 'steps'
|
| 1729 |
+
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
| 1730 |
+
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
| 1731 |
+
if ga_steps is not None and ga_steps > 1:
|
| 1732 |
+
from transformers import __version__ as transformers_version
|
| 1733 |
+
if Version(transformers_version) <= Version('4.45.2'):
|
| 1734 |
+
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
| 1735 |
+
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
| 1736 |
+
if getattr(args, 'eval_strategy', 'no') != 'no':
|
| 1737 |
+
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
| 1738 |
+
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
| 1739 |
+
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
| 1740 |
+
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
| 1741 |
+
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
| 1742 |
+
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
| 1743 |
+
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
| 1744 |
+
if force_float32:
|
| 1745 |
+
args.bf16_full_eval = False
|
| 1746 |
+
args.fp16_full_eval = False
|
| 1747 |
+
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
| 1748 |
+
args.bf16_full_eval = True
|
| 1749 |
+
args.fp16_full_eval = False
|
| 1750 |
+
elif not bf16_full_eval and not fp16_full_eval:
|
| 1751 |
+
args.bf16_full_eval = args.bf16
|
| 1752 |
+
args.fp16_full_eval = args.fp16
|
| 1753 |
+
_output_logits = False
|
| 1754 |
+
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
| 1755 |
+
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
| 1756 |
+
if _output_logits:
|
| 1757 |
+
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
| 1758 |
+
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
| 1759 |
+
pass
|
| 1760 |
+
else:
|
| 1761 |
+
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
| 1762 |
+
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
| 1763 |
+
if args_max_seq_length is None and model_max_seq_length is not None:
|
| 1764 |
+
max_seq_length = model.max_seq_length
|
| 1765 |
+
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
| 1766 |
+
if model is not None and hasattr(model, 'for_training'):
|
| 1767 |
+
model.for_training()
|
| 1768 |
+
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
| 1769 |
+
if 'processing_class' in locals():
|
| 1770 |
+
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
| 1771 |
+
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
| 1772 |
+
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
| 1773 |
+
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
| 1774 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1775 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
| 1776 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer, mlm = False)
|
| 1777 |
+
elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
| 1778 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer)
|
| 1779 |
+
else:
|
| 1780 |
+
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
| 1781 |
+
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
| 1782 |
+
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
| 1783 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1784 |
+
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
| 1785 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
| 1786 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer)
|
| 1787 |
+
else:
|
| 1788 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False)
|
| 1789 |
+
other_metrics = []
|
| 1790 |
+
|
| 1791 |
+
from unsloth_zoo.logging_utils import PatchRLStatistics
|
| 1792 |
+
PatchRLStatistics('bco_trainer', other_metrics)
|
| 1793 |
+
|
| 1794 |
+
super().__init__(
|
| 1795 |
+
model = model,
|
| 1796 |
+
ref_model = ref_model,
|
| 1797 |
+
args = args,
|
| 1798 |
+
train_dataset = train_dataset,
|
| 1799 |
+
eval_dataset = eval_dataset,
|
| 1800 |
+
processing_class = processing_class,
|
| 1801 |
+
data_collator = data_collator,
|
| 1802 |
+
model_init = model_init,
|
| 1803 |
+
callbacks = callbacks,
|
| 1804 |
+
preprocess_logits_for_metrics = preprocess_logits_for_metrics,
|
| 1805 |
+
peft_config = peft_config,
|
| 1806 |
+
compute_metrics = compute_metrics,
|
| 1807 |
+
model_adapter_name = model_adapter_name,
|
| 1808 |
+
ref_adapter_name = ref_adapter_name,
|
| 1809 |
+
embedding_func = embedding_func,
|
| 1810 |
+
embedding_tokenizer = embedding_tokenizer,**kwargs)
|
| 1811 |
+
if hasattr(self, 'neftune_hook_handle'):
|
| 1812 |
+
self.neftune_hook_handle.remove()
|
| 1813 |
+
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
| 1814 |
+
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
| 1815 |
+
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
| 1816 |
+
pass
|
| 1817 |
+
|
| 1818 |
+
pass
|
unsloth_compiled_cache/UnslothCPOTrainer.py
ADDED
|
@@ -0,0 +1,1551 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
2025.4.1
|
| 3 |
+
2025.4.1
|
| 4 |
+
4.51.3
|
| 5 |
+
0.15.2
|
| 6 |
+
__UNSLOTH_VERSIONING__
|
| 7 |
+
"""
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from torch.nn import functional as F
|
| 12 |
+
from trl.trainer.cpo_trainer import (Any, AutoModelForCausalLM, BaseImageProcessor, CPOConfig, CPOTrainer, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, Literal, Optional, PartialState, PeftModel, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, Trainer, TrainerCallback, Union, add_bos_token_if_needed, add_eos_token_if_needed, amp, defaultdict, disable_dropout_in_model, generate_model_card, get_comet_experiment_url, inspect, is_comet_available, is_peft_available, is_torch_fx_proxy, is_wandb_available, log_table_to_comet_experiment, maybe_apply_chat_template, maybe_extract_prompt, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_model_for_kbit_training, random, textwrap, torch, transformers, version, warnings)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from typing import *
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from packaging.version import Version
|
| 19 |
+
import torch
|
| 20 |
+
import numpy as np
|
| 21 |
+
from contextlib import nullcontext
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling
|
| 24 |
+
|
| 25 |
+
torch_compile_options = {
|
| 26 |
+
"epilogue_fusion" : True,
|
| 27 |
+
"max_autotune" : False,
|
| 28 |
+
"shape_padding" : True,
|
| 29 |
+
"trace.enabled" : False,
|
| 30 |
+
"triton.cudagraphs" : False,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
| 34 |
+
def selective_log_softmax(logits, index):
|
| 35 |
+
logits = logits.to(torch.float32)
|
| 36 |
+
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
| 37 |
+
# loop to reduce peak mem consumption
|
| 38 |
+
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
| 39 |
+
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
| 40 |
+
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
| 41 |
+
return per_token_logps
|
| 42 |
+
@dataclass
|
| 43 |
+
class UnslothCPOConfig(CPOConfig):
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
Configuration class for the [`CPOTrainer`].
|
| 47 |
+
|
| 48 |
+
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
| 49 |
+
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
| 50 |
+
command line.
|
| 51 |
+
|
| 52 |
+
Parameters:
|
| 53 |
+
learning_rate (`float`, *optional*, defaults to `1e-6`):
|
| 54 |
+
Initial learning rate for [`AdamW`] optimizer. The default value replaces that of
|
| 55 |
+
[`~transformers.TrainingArguments`].
|
| 56 |
+
max_length (`int` or `None`, *optional*, defaults to `1024`):
|
| 57 |
+
Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
|
| 58 |
+
to use the default data collator.
|
| 59 |
+
max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
|
| 60 |
+
Maximum length of the prompt. This argument is required if you want to use the default data collator.
|
| 61 |
+
max_completion_length (`int` or `None`, *optional*, defaults to `None`):
|
| 62 |
+
Maximum length of the completion. This argument is required if you want to use the default data collator
|
| 63 |
+
and your model is an encoder-decoder.
|
| 64 |
+
beta (`float`, *optional*, defaults to `0.1`):
|
| 65 |
+
Parameter controlling the deviation from the reference model. Higher β means less deviation from the
|
| 66 |
+
reference model. For the IPO loss (`loss_type="ipo"`), β is the regularization parameter denoted by τ in
|
| 67 |
+
the [paper](https://huggingface.co/papers/2310.12036).
|
| 68 |
+
label_smoothing (`float`, *optional*, defaults to `0.0`):
|
| 69 |
+
Label smoothing factor. This argument is required if you want to use the default data collator.
|
| 70 |
+
loss_type (`str`, *optional*, defaults to `"sigmoid"`):
|
| 71 |
+
Type of loss to use. Possible values are:
|
| 72 |
+
|
| 73 |
+
- `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper.
|
| 74 |
+
- `"hinge"`: hinge loss on the normalized likelihood from the [SLiC](https://huggingface.co/papers/2305.10425) paper.
|
| 75 |
+
- `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper.
|
| 76 |
+
- `"simpo"`: SimPO loss from the [SimPO](https://huggingface.co/papers/2405.14734) paper.
|
| 77 |
+
|
| 78 |
+
disable_dropout (`bool`, *optional*, defaults to `True`):
|
| 79 |
+
Whether to disable dropout in the model.
|
| 80 |
+
cpo_alpha (`float`, *optional*, defaults to `1.0`):
|
| 81 |
+
Weight of the BC regularizer in CPO training.
|
| 82 |
+
simpo_gamma (`float`, *optional*, defaults to `0.5`):
|
| 83 |
+
Target reward margin for the SimPO loss, used only when the `loss_type="simpo"`.
|
| 84 |
+
label_pad_token_id (`int`, *optional*, defaults to `-100`):
|
| 85 |
+
Label pad token id. This argument is required if you want to use the default data collator.
|
| 86 |
+
padding_value (`int` or `None`, *optional*, defaults to `None`):
|
| 87 |
+
Padding value to use. If `None`, the padding value of the tokenizer is used.
|
| 88 |
+
truncation_mode (`str`,*optional*, defaults to `"keep_end"`):
|
| 89 |
+
Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
|
| 90 |
+
This argument is required if you want to use the default data collator.
|
| 91 |
+
generate_during_eval (`bool`, *optional*, defaults to `False`):
|
| 92 |
+
If `True`, generates and logs completions from the model to W&B or Comet during evaluation.
|
| 93 |
+
is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`):
|
| 94 |
+
When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
|
| 95 |
+
you need to specify if the model returned by the callable is an encoder-decoder model.
|
| 96 |
+
model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
|
| 97 |
+
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
|
| 98 |
+
string.
|
| 99 |
+
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
|
| 100 |
+
Number of processes to use for processing the dataset.
|
| 101 |
+
|
| 102 |
+
"""
|
| 103 |
+
vllm_sampling_params: Optional[Any] = field(
|
| 104 |
+
default = None,
|
| 105 |
+
metadata = {'help': 'vLLM SamplingParams'},
|
| 106 |
+
)
|
| 107 |
+
unsloth_num_chunks : Optional[int] = field(
|
| 108 |
+
default = -1,
|
| 109 |
+
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
| 110 |
+
)
|
| 111 |
+
def __init__(
|
| 112 |
+
self,
|
| 113 |
+
output_dir = None,
|
| 114 |
+
overwrite_output_dir = None,
|
| 115 |
+
do_train = False,
|
| 116 |
+
do_eval = False,
|
| 117 |
+
do_predict = False,
|
| 118 |
+
eval_strategy = 'no',
|
| 119 |
+
prediction_loss_only = False,
|
| 120 |
+
per_device_train_batch_size = 4,
|
| 121 |
+
per_device_eval_batch_size = 4,
|
| 122 |
+
per_gpu_train_batch_size = None,
|
| 123 |
+
per_gpu_eval_batch_size = None,
|
| 124 |
+
gradient_accumulation_steps = 2,
|
| 125 |
+
eval_accumulation_steps = 2,
|
| 126 |
+
eval_delay = 0,
|
| 127 |
+
torch_empty_cache_steps = 250,
|
| 128 |
+
learning_rate = 5e-05,
|
| 129 |
+
weight_decay = 0.01,
|
| 130 |
+
adam_beta1 = 0.9,
|
| 131 |
+
adam_beta2 = 0.999,
|
| 132 |
+
adam_epsilon = 1e-08,
|
| 133 |
+
max_grad_norm = 1.0,
|
| 134 |
+
num_train_epochs = 3.0,
|
| 135 |
+
max_steps = -1,
|
| 136 |
+
lr_scheduler_type = 'linear',
|
| 137 |
+
warmup_ratio = 0.1,
|
| 138 |
+
warmup_steps = 0,
|
| 139 |
+
log_level = 'passive',
|
| 140 |
+
log_level_replica = 'warning',
|
| 141 |
+
log_on_each_node = True,
|
| 142 |
+
logging_dir = None,
|
| 143 |
+
logging_strategy = 'steps',
|
| 144 |
+
logging_first_step = False,
|
| 145 |
+
logging_steps = 1,
|
| 146 |
+
logging_nan_inf_filter = False,
|
| 147 |
+
save_strategy = 'steps',
|
| 148 |
+
save_steps = 500,
|
| 149 |
+
save_total_limit = None,
|
| 150 |
+
save_safetensors = True,
|
| 151 |
+
save_on_each_node = False,
|
| 152 |
+
save_only_model = False,
|
| 153 |
+
restore_callback_states_from_checkpoint = False,
|
| 154 |
+
no_cuda = False,
|
| 155 |
+
use_cpu = False,
|
| 156 |
+
use_mps_device = False,
|
| 157 |
+
seed = 3407,
|
| 158 |
+
data_seed = 3407,
|
| 159 |
+
jit_mode_eval = False,
|
| 160 |
+
use_ipex = False,
|
| 161 |
+
bf16 = False,
|
| 162 |
+
fp16 = False,
|
| 163 |
+
fp16_opt_level = 'O1',
|
| 164 |
+
half_precision_backend = 'auto',
|
| 165 |
+
bf16_full_eval = False,
|
| 166 |
+
fp16_full_eval = False,
|
| 167 |
+
tf32 = None,
|
| 168 |
+
local_rank = -1,
|
| 169 |
+
ddp_backend = None,
|
| 170 |
+
tpu_num_cores = None,
|
| 171 |
+
tpu_metrics_debug = False,
|
| 172 |
+
debug = '',
|
| 173 |
+
dataloader_drop_last = False,
|
| 174 |
+
eval_steps = None,
|
| 175 |
+
dataloader_num_workers = 0,
|
| 176 |
+
dataloader_prefetch_factor = None,
|
| 177 |
+
past_index = -1,
|
| 178 |
+
run_name = None,
|
| 179 |
+
disable_tqdm = None,
|
| 180 |
+
remove_unused_columns = True,
|
| 181 |
+
label_names = None,
|
| 182 |
+
load_best_model_at_end = False,
|
| 183 |
+
metric_for_best_model = None,
|
| 184 |
+
greater_is_better = None,
|
| 185 |
+
ignore_data_skip = False,
|
| 186 |
+
fsdp = '',
|
| 187 |
+
fsdp_min_num_params = 0,
|
| 188 |
+
fsdp_config = None,
|
| 189 |
+
tp_size = 0,
|
| 190 |
+
fsdp_transformer_layer_cls_to_wrap = None,
|
| 191 |
+
accelerator_config = None,
|
| 192 |
+
deepspeed = None,
|
| 193 |
+
label_smoothing_factor = 0.0,
|
| 194 |
+
optim = 'adamw_8bit',
|
| 195 |
+
optim_args = None,
|
| 196 |
+
adafactor = False,
|
| 197 |
+
group_by_length = False,
|
| 198 |
+
length_column_name = 'length',
|
| 199 |
+
report_to = None,
|
| 200 |
+
ddp_find_unused_parameters = None,
|
| 201 |
+
ddp_bucket_cap_mb = None,
|
| 202 |
+
ddp_broadcast_buffers = None,
|
| 203 |
+
dataloader_pin_memory = True,
|
| 204 |
+
dataloader_persistent_workers = False,
|
| 205 |
+
skip_memory_metrics = True,
|
| 206 |
+
use_legacy_prediction_loop = False,
|
| 207 |
+
push_to_hub = False,
|
| 208 |
+
resume_from_checkpoint = None,
|
| 209 |
+
hub_model_id = None,
|
| 210 |
+
hub_strategy = 'every_save',
|
| 211 |
+
hub_token = None,
|
| 212 |
+
hub_private_repo = None,
|
| 213 |
+
hub_always_push = False,
|
| 214 |
+
gradient_checkpointing = False,
|
| 215 |
+
gradient_checkpointing_kwargs = None,
|
| 216 |
+
include_inputs_for_metrics = False,
|
| 217 |
+
eval_do_concat_batches = True,
|
| 218 |
+
fp16_backend = 'auto',
|
| 219 |
+
push_to_hub_model_id = None,
|
| 220 |
+
push_to_hub_organization = None,
|
| 221 |
+
push_to_hub_token = None,
|
| 222 |
+
mp_parameters = '',
|
| 223 |
+
auto_find_batch_size = False,
|
| 224 |
+
full_determinism = False,
|
| 225 |
+
torchdynamo = None,
|
| 226 |
+
ray_scope = 'last',
|
| 227 |
+
ddp_timeout = 1800,
|
| 228 |
+
torch_compile = False,
|
| 229 |
+
torch_compile_backend = None,
|
| 230 |
+
torch_compile_mode = None,
|
| 231 |
+
include_tokens_per_second = False,
|
| 232 |
+
include_num_input_tokens_seen = False,
|
| 233 |
+
neftune_noise_alpha = None,
|
| 234 |
+
optim_target_modules = None,
|
| 235 |
+
batch_eval_metrics = False,
|
| 236 |
+
eval_on_start = False,
|
| 237 |
+
use_liger_kernel = False,
|
| 238 |
+
eval_use_gather_object = False,
|
| 239 |
+
average_tokens_across_devices = False,
|
| 240 |
+
max_length = 1024,
|
| 241 |
+
max_prompt_length = 512,
|
| 242 |
+
max_completion_length = None,
|
| 243 |
+
beta = 0.1,
|
| 244 |
+
label_smoothing = 0.0,
|
| 245 |
+
loss_type = 'sigmoid',
|
| 246 |
+
disable_dropout = True,
|
| 247 |
+
cpo_alpha = 1.0,
|
| 248 |
+
simpo_gamma = 0.5,
|
| 249 |
+
label_pad_token_id = -100,
|
| 250 |
+
padding_value = None,
|
| 251 |
+
truncation_mode = 'keep_end',
|
| 252 |
+
generate_during_eval = False,
|
| 253 |
+
is_encoder_decoder = None,
|
| 254 |
+
model_init_kwargs = None,
|
| 255 |
+
dataset_num_proc = None,
|
| 256 |
+
vllm_sampling_params = None,
|
| 257 |
+
unsloth_num_chunks = -1,
|
| 258 |
+
**kwargs,
|
| 259 |
+
):
|
| 260 |
+
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
| 261 |
+
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
| 262 |
+
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
| 263 |
+
output_dir = 'unsloth_training_checkpoints'
|
| 264 |
+
save_strategy = 'no'
|
| 265 |
+
if dataset_num_proc is None:
|
| 266 |
+
from multiprocessing import cpu_count
|
| 267 |
+
dataset_num_proc = cpu_count()
|
| 268 |
+
|
| 269 |
+
super().__init__(
|
| 270 |
+
output_dir = output_dir,
|
| 271 |
+
overwrite_output_dir = overwrite_output_dir,
|
| 272 |
+
do_train = do_train,
|
| 273 |
+
do_eval = do_eval,
|
| 274 |
+
do_predict = do_predict,
|
| 275 |
+
eval_strategy = eval_strategy,
|
| 276 |
+
prediction_loss_only = prediction_loss_only,
|
| 277 |
+
per_device_train_batch_size = per_device_train_batch_size,
|
| 278 |
+
per_device_eval_batch_size = per_device_eval_batch_size,
|
| 279 |
+
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
| 280 |
+
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
| 281 |
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 282 |
+
eval_accumulation_steps = eval_accumulation_steps,
|
| 283 |
+
eval_delay = eval_delay,
|
| 284 |
+
torch_empty_cache_steps = torch_empty_cache_steps,
|
| 285 |
+
learning_rate = learning_rate,
|
| 286 |
+
weight_decay = weight_decay,
|
| 287 |
+
adam_beta1 = adam_beta1,
|
| 288 |
+
adam_beta2 = adam_beta2,
|
| 289 |
+
adam_epsilon = adam_epsilon,
|
| 290 |
+
max_grad_norm = max_grad_norm,
|
| 291 |
+
num_train_epochs = num_train_epochs,
|
| 292 |
+
max_steps = max_steps,
|
| 293 |
+
lr_scheduler_type = lr_scheduler_type,
|
| 294 |
+
warmup_ratio = warmup_ratio,
|
| 295 |
+
warmup_steps = warmup_steps,
|
| 296 |
+
log_level = log_level,
|
| 297 |
+
log_level_replica = log_level_replica,
|
| 298 |
+
log_on_each_node = log_on_each_node,
|
| 299 |
+
logging_dir = logging_dir,
|
| 300 |
+
logging_strategy = logging_strategy,
|
| 301 |
+
logging_first_step = logging_first_step,
|
| 302 |
+
logging_steps = logging_steps,
|
| 303 |
+
logging_nan_inf_filter = logging_nan_inf_filter,
|
| 304 |
+
save_strategy = save_strategy,
|
| 305 |
+
save_steps = save_steps,
|
| 306 |
+
save_total_limit = save_total_limit,
|
| 307 |
+
save_safetensors = save_safetensors,
|
| 308 |
+
save_on_each_node = save_on_each_node,
|
| 309 |
+
save_only_model = save_only_model,
|
| 310 |
+
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
| 311 |
+
no_cuda = no_cuda,
|
| 312 |
+
use_cpu = use_cpu,
|
| 313 |
+
use_mps_device = use_mps_device,
|
| 314 |
+
seed = seed,
|
| 315 |
+
data_seed = data_seed,
|
| 316 |
+
jit_mode_eval = jit_mode_eval,
|
| 317 |
+
use_ipex = use_ipex,
|
| 318 |
+
bf16 = bf16,
|
| 319 |
+
fp16 = fp16,
|
| 320 |
+
fp16_opt_level = fp16_opt_level,
|
| 321 |
+
half_precision_backend = half_precision_backend,
|
| 322 |
+
bf16_full_eval = bf16_full_eval,
|
| 323 |
+
fp16_full_eval = fp16_full_eval,
|
| 324 |
+
tf32 = tf32,
|
| 325 |
+
local_rank = local_rank,
|
| 326 |
+
ddp_backend = ddp_backend,
|
| 327 |
+
tpu_num_cores = tpu_num_cores,
|
| 328 |
+
tpu_metrics_debug = tpu_metrics_debug,
|
| 329 |
+
debug = debug,
|
| 330 |
+
dataloader_drop_last = dataloader_drop_last,
|
| 331 |
+
eval_steps = eval_steps,
|
| 332 |
+
dataloader_num_workers = dataloader_num_workers,
|
| 333 |
+
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
| 334 |
+
past_index = past_index,
|
| 335 |
+
run_name = run_name,
|
| 336 |
+
disable_tqdm = disable_tqdm,
|
| 337 |
+
remove_unused_columns = remove_unused_columns,
|
| 338 |
+
label_names = label_names,
|
| 339 |
+
load_best_model_at_end = load_best_model_at_end,
|
| 340 |
+
metric_for_best_model = metric_for_best_model,
|
| 341 |
+
greater_is_better = greater_is_better,
|
| 342 |
+
ignore_data_skip = ignore_data_skip,
|
| 343 |
+
fsdp = fsdp,
|
| 344 |
+
fsdp_min_num_params = fsdp_min_num_params,
|
| 345 |
+
fsdp_config = fsdp_config,
|
| 346 |
+
tp_size = tp_size,
|
| 347 |
+
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
| 348 |
+
accelerator_config = accelerator_config,
|
| 349 |
+
deepspeed = deepspeed,
|
| 350 |
+
label_smoothing_factor = label_smoothing_factor,
|
| 351 |
+
optim = optim,
|
| 352 |
+
optim_args = optim_args,
|
| 353 |
+
adafactor = adafactor,
|
| 354 |
+
group_by_length = group_by_length,
|
| 355 |
+
length_column_name = length_column_name,
|
| 356 |
+
report_to = report_to,
|
| 357 |
+
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
| 358 |
+
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
| 359 |
+
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
| 360 |
+
dataloader_pin_memory = dataloader_pin_memory,
|
| 361 |
+
dataloader_persistent_workers = dataloader_persistent_workers,
|
| 362 |
+
skip_memory_metrics = skip_memory_metrics,
|
| 363 |
+
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
| 364 |
+
push_to_hub = push_to_hub,
|
| 365 |
+
resume_from_checkpoint = resume_from_checkpoint,
|
| 366 |
+
hub_model_id = hub_model_id,
|
| 367 |
+
hub_strategy = hub_strategy,
|
| 368 |
+
hub_token = hub_token,
|
| 369 |
+
hub_private_repo = hub_private_repo,
|
| 370 |
+
hub_always_push = hub_always_push,
|
| 371 |
+
gradient_checkpointing = gradient_checkpointing,
|
| 372 |
+
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
| 373 |
+
include_inputs_for_metrics = include_inputs_for_metrics,
|
| 374 |
+
eval_do_concat_batches = eval_do_concat_batches,
|
| 375 |
+
fp16_backend = fp16_backend,
|
| 376 |
+
push_to_hub_model_id = push_to_hub_model_id,
|
| 377 |
+
push_to_hub_organization = push_to_hub_organization,
|
| 378 |
+
push_to_hub_token = push_to_hub_token,
|
| 379 |
+
mp_parameters = mp_parameters,
|
| 380 |
+
auto_find_batch_size = auto_find_batch_size,
|
| 381 |
+
full_determinism = full_determinism,
|
| 382 |
+
torchdynamo = torchdynamo,
|
| 383 |
+
ray_scope = ray_scope,
|
| 384 |
+
ddp_timeout = ddp_timeout,
|
| 385 |
+
torch_compile = torch_compile,
|
| 386 |
+
torch_compile_backend = torch_compile_backend,
|
| 387 |
+
torch_compile_mode = torch_compile_mode,
|
| 388 |
+
include_tokens_per_second = include_tokens_per_second,
|
| 389 |
+
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
| 390 |
+
neftune_noise_alpha = neftune_noise_alpha,
|
| 391 |
+
optim_target_modules = optim_target_modules,
|
| 392 |
+
batch_eval_metrics = batch_eval_metrics,
|
| 393 |
+
eval_on_start = eval_on_start,
|
| 394 |
+
use_liger_kernel = use_liger_kernel,
|
| 395 |
+
eval_use_gather_object = eval_use_gather_object,
|
| 396 |
+
average_tokens_across_devices = average_tokens_across_devices,
|
| 397 |
+
max_length = max_length,
|
| 398 |
+
max_prompt_length = max_prompt_length,
|
| 399 |
+
max_completion_length = max_completion_length,
|
| 400 |
+
beta = beta,
|
| 401 |
+
label_smoothing = label_smoothing,
|
| 402 |
+
loss_type = loss_type,
|
| 403 |
+
disable_dropout = disable_dropout,
|
| 404 |
+
cpo_alpha = cpo_alpha,
|
| 405 |
+
simpo_gamma = simpo_gamma,
|
| 406 |
+
label_pad_token_id = label_pad_token_id,
|
| 407 |
+
padding_value = padding_value,
|
| 408 |
+
truncation_mode = truncation_mode,
|
| 409 |
+
generate_during_eval = generate_during_eval,
|
| 410 |
+
is_encoder_decoder = is_encoder_decoder,
|
| 411 |
+
model_init_kwargs = model_init_kwargs,
|
| 412 |
+
dataset_num_proc = dataset_num_proc,**kwargs)
|
| 413 |
+
self.vllm_sampling_params = vllm_sampling_params
|
| 414 |
+
self.unsloth_num_chunks = unsloth_num_chunks
|
| 415 |
+
pass
|
| 416 |
+
|
| 417 |
+
class _UnslothCPOTrainer(Trainer):
|
| 418 |
+
r""""""
|
| 419 |
+
|
| 420 |
+
_tag_names = ["trl", "cpo"]
|
| 421 |
+
|
| 422 |
+
def __init__(
|
| 423 |
+
self,
|
| 424 |
+
model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
|
| 425 |
+
args: Optional[CPOConfig] = None,
|
| 426 |
+
data_collator: Optional[DataCollator] = None,
|
| 427 |
+
train_dataset: Optional[Dataset] = None,
|
| 428 |
+
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
|
| 429 |
+
processing_class: Optional[
|
| 430 |
+
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
|
| 431 |
+
] = None,
|
| 432 |
+
model_init: Optional[Callable[[], PreTrainedModel]] = None,
|
| 433 |
+
callbacks: Optional[list[TrainerCallback]] = None,
|
| 434 |
+
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
| 435 |
+
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
| 436 |
+
peft_config: Optional[dict] = None,
|
| 437 |
+
compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
|
| 438 |
+
):
|
| 439 |
+
if args.model_init_kwargs is None:
|
| 440 |
+
model_init_kwargs = {}
|
| 441 |
+
elif not isinstance(model, str):
|
| 442 |
+
raise ValueError("You passed model_kwargs to the CPOTrainer. But your model is already instantiated.")
|
| 443 |
+
else:
|
| 444 |
+
model_init_kwargs = args.model_init_kwargs
|
| 445 |
+
torch_dtype = model_init_kwargs.get("torch_dtype")
|
| 446 |
+
if torch_dtype is not None:
|
| 447 |
+
# Convert to `torch.dtype` if an str is passed
|
| 448 |
+
if isinstance(torch_dtype, str) and torch_dtype != "auto":
|
| 449 |
+
torch_dtype = getattr(torch, torch_dtype)
|
| 450 |
+
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
|
| 451 |
+
raise ValueError(
|
| 452 |
+
f"Invalid `torch_dtype` passed to the CPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
|
| 453 |
+
)
|
| 454 |
+
model_init_kwargs["torch_dtype"] = torch_dtype
|
| 455 |
+
|
| 456 |
+
if isinstance(model, str):
|
| 457 |
+
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
|
| 458 |
+
|
| 459 |
+
# Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
|
| 460 |
+
# has been called in order to properly call autocast if needed.
|
| 461 |
+
self._peft_has_been_casted_to_bf16 = False
|
| 462 |
+
|
| 463 |
+
if not is_peft_available() and peft_config is not None:
|
| 464 |
+
raise ValueError(
|
| 465 |
+
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
|
| 466 |
+
)
|
| 467 |
+
elif is_peft_available() and peft_config is not None:
|
| 468 |
+
# if model is a peft model and we have a peft_config, we merge and unload it first
|
| 469 |
+
if isinstance(model, PeftModel):
|
| 470 |
+
model = model.merge_and_unload()
|
| 471 |
+
|
| 472 |
+
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
|
| 473 |
+
_support_gc_kwargs = hasattr(
|
| 474 |
+
args, "gradient_checkpointing_kwargs"
|
| 475 |
+
) and "gradient_checkpointing_kwargs" in list(
|
| 476 |
+
inspect.signature(prepare_model_for_kbit_training).parameters
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
|
| 480 |
+
|
| 481 |
+
if _support_gc_kwargs:
|
| 482 |
+
prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
|
| 483 |
+
|
| 484 |
+
model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
|
| 485 |
+
elif getattr(args, "gradient_checkpointing", False):
|
| 486 |
+
# For backward compatibility with older versions of transformers
|
| 487 |
+
if hasattr(model, "enable_input_require_grads"):
|
| 488 |
+
model.enable_input_require_grads()
|
| 489 |
+
else:
|
| 490 |
+
|
| 491 |
+
def make_inputs_require_grad(module, input, output):
|
| 492 |
+
output.requires_grad_(True)
|
| 493 |
+
|
| 494 |
+
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
| 495 |
+
|
| 496 |
+
# get peft model with the given config
|
| 497 |
+
model = model
|
| 498 |
+
if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
|
| 499 |
+
peft_module_casting_to_bf16(model)
|
| 500 |
+
# If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
|
| 501 |
+
self._peft_has_been_casted_to_bf16 = True
|
| 502 |
+
|
| 503 |
+
# For models that use gradient_checkpointing, we need to attach a hook that enables input
|
| 504 |
+
# to explicitly have `requires_grad=True`, otherwise training will either silently
|
| 505 |
+
# fail or completely fail.
|
| 506 |
+
elif getattr(args, "gradient_checkpointing", False):
|
| 507 |
+
# For backward compatibility with older versions of transformers
|
| 508 |
+
if hasattr(model, "enable_input_require_grads"):
|
| 509 |
+
model.enable_input_require_grads()
|
| 510 |
+
else:
|
| 511 |
+
|
| 512 |
+
def make_inputs_require_grad(module, input, output):
|
| 513 |
+
output.requires_grad_(True)
|
| 514 |
+
|
| 515 |
+
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
| 516 |
+
|
| 517 |
+
if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
|
| 518 |
+
raise ValueError(
|
| 519 |
+
"`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
|
| 520 |
+
" Please install `wandb` or `comet-ml` to resolve."
|
| 521 |
+
)
|
| 522 |
+
|
| 523 |
+
if model is not None:
|
| 524 |
+
self.is_encoder_decoder = model.config.is_encoder_decoder
|
| 525 |
+
elif args.is_encoder_decoder is None:
|
| 526 |
+
raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
|
| 527 |
+
else:
|
| 528 |
+
self.is_encoder_decoder = args.is_encoder_decoder
|
| 529 |
+
|
| 530 |
+
if self.is_encoder_decoder:
|
| 531 |
+
self.decoder_start_token_id = model.config.decoder_start_token_id
|
| 532 |
+
self.pad_token_id = model.config.pad_token_id
|
| 533 |
+
|
| 534 |
+
if processing_class is None:
|
| 535 |
+
raise ValueError("processing_class must be specified to tokenize a CPO dataset.")
|
| 536 |
+
if args.max_length is None:
|
| 537 |
+
warnings.warn(
|
| 538 |
+
"`max_length` is not set in the CPOConfig's init"
|
| 539 |
+
" it will default to `512` by default, but you should do it yourself in the future.",
|
| 540 |
+
UserWarning,
|
| 541 |
+
)
|
| 542 |
+
max_length = 512
|
| 543 |
+
else:
|
| 544 |
+
max_length = args.max_length
|
| 545 |
+
if args.max_prompt_length is None:
|
| 546 |
+
warnings.warn(
|
| 547 |
+
"`max_prompt_length` is not set in the CPOConfig's init"
|
| 548 |
+
" it will default to `128` by default, but you should do it yourself in the future.",
|
| 549 |
+
UserWarning,
|
| 550 |
+
)
|
| 551 |
+
max_prompt_length = 128
|
| 552 |
+
else:
|
| 553 |
+
max_prompt_length = args.max_prompt_length
|
| 554 |
+
|
| 555 |
+
if args.max_completion_length is None and self.is_encoder_decoder:
|
| 556 |
+
warnings.warn(
|
| 557 |
+
"When using an encoder decoder architecture, you should set `max_completion_length` in the CPOConfig's init"
|
| 558 |
+
" it will default to `128` by default, but you should do it yourself in the future.",
|
| 559 |
+
UserWarning,
|
| 560 |
+
)
|
| 561 |
+
max_completion_length = 128
|
| 562 |
+
else:
|
| 563 |
+
max_completion_length = args.max_completion_length
|
| 564 |
+
|
| 565 |
+
if data_collator is None:
|
| 566 |
+
data_collator = DPODataCollatorWithPadding(
|
| 567 |
+
pad_token_id=processing_class.pad_token_id,
|
| 568 |
+
label_pad_token_id=args.label_pad_token_id,
|
| 569 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
if args.remove_unused_columns:
|
| 573 |
+
args.remove_unused_columns = False
|
| 574 |
+
# warn users
|
| 575 |
+
warnings.warn(
|
| 576 |
+
"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments"
|
| 577 |
+
" we have set it for you, but you should do it yourself in the future.",
|
| 578 |
+
UserWarning,
|
| 579 |
+
)
|
| 580 |
+
|
| 581 |
+
self.use_dpo_data_collator = True
|
| 582 |
+
else:
|
| 583 |
+
self.use_dpo_data_collator = False
|
| 584 |
+
|
| 585 |
+
# Disable dropout in the model
|
| 586 |
+
if args.disable_dropout:
|
| 587 |
+
disable_dropout_in_model(model)
|
| 588 |
+
|
| 589 |
+
self.max_length = max_length
|
| 590 |
+
self.generate_during_eval = args.generate_during_eval
|
| 591 |
+
self.label_pad_token_id = args.label_pad_token_id
|
| 592 |
+
self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id
|
| 593 |
+
self.max_prompt_length = max_prompt_length
|
| 594 |
+
self.truncation_mode = args.truncation_mode
|
| 595 |
+
self.max_completion_length = max_completion_length
|
| 596 |
+
self.processing_class = processing_class
|
| 597 |
+
|
| 598 |
+
if args.loss_type in ["hinge", "ipo"] and args.label_smoothing > 0:
|
| 599 |
+
warnings.warn(
|
| 600 |
+
f"You are using the {args.loss_type} loss type that does not support label smoothing. The "
|
| 601 |
+
"`label_smoothing` parameter will be ignored. Set `label_smoothing` to `0.0` to remove this warning.",
|
| 602 |
+
UserWarning,
|
| 603 |
+
)
|
| 604 |
+
if args.loss_type == "kto_pair":
|
| 605 |
+
raise ValueError("Support for kto_pair has been removed in CPOTrainer. Please use KTOTrainer.")
|
| 606 |
+
|
| 607 |
+
self.beta = args.beta
|
| 608 |
+
self.label_smoothing = args.label_smoothing
|
| 609 |
+
self.loss_type = args.loss_type
|
| 610 |
+
self.cpo_alpha = args.cpo_alpha
|
| 611 |
+
self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
|
| 612 |
+
self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
|
| 613 |
+
if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
|
| 614 |
+
warnings.warn(
|
| 615 |
+
"You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
|
| 616 |
+
"`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
|
| 617 |
+
"greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
|
| 618 |
+
"loss.",
|
| 619 |
+
UserWarning,
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
if args.loss_type == "simpo":
|
| 623 |
+
self.simpo_gamma = args.simpo_gamma
|
| 624 |
+
|
| 625 |
+
self._stored_metrics = defaultdict(lambda: defaultdict(list))
|
| 626 |
+
|
| 627 |
+
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
|
| 628 |
+
# input tensor associated with the key "input_ids". However, in CPO, the sampled data does not include the
|
| 629 |
+
# "input_ids" key. Instead, the available keys are "prompt_input_ids", "chosen_input_ids", and
|
| 630 |
+
# "rejected_input_ids". As a result, the trainer issues the warning: "Could not estimate the number of tokens
|
| 631 |
+
# of the input, floating-point operations will not be computed." To suppress this warning, we set the
|
| 632 |
+
# "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate
|
| 633 |
+
# that the warning has already been issued.
|
| 634 |
+
model.warnings_issued["estimate_tokens"] = True
|
| 635 |
+
|
| 636 |
+
# Compute that only on the main process for faster data processing.
|
| 637 |
+
# see: https://github.com/huggingface/trl/pull/1255
|
| 638 |
+
with PartialState().local_main_process_first():
|
| 639 |
+
# Extract the prompt if needed, and apply the chat template if needed
|
| 640 |
+
train_dataset = train_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc)
|
| 641 |
+
train_dataset = train_dataset.map(
|
| 642 |
+
maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc
|
| 643 |
+
)
|
| 644 |
+
if eval_dataset is not None:
|
| 645 |
+
eval_dataset = eval_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc)
|
| 646 |
+
eval_dataset = eval_dataset.map(
|
| 647 |
+
maybe_apply_chat_template,
|
| 648 |
+
fn_kwargs={"tokenizer": processing_class},
|
| 649 |
+
num_proc=args.dataset_num_proc,
|
| 650 |
+
)
|
| 651 |
+
|
| 652 |
+
# tokenize the dataset
|
| 653 |
+
train_dataset = train_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc)
|
| 654 |
+
if eval_dataset is not None:
|
| 655 |
+
eval_dataset = eval_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc)
|
| 656 |
+
|
| 657 |
+
super().__init__(
|
| 658 |
+
model=model,
|
| 659 |
+
args=args,
|
| 660 |
+
data_collator=data_collator,
|
| 661 |
+
train_dataset=train_dataset,
|
| 662 |
+
eval_dataset=eval_dataset,
|
| 663 |
+
processing_class=processing_class,
|
| 664 |
+
model_init=model_init,
|
| 665 |
+
compute_metrics=compute_metrics,
|
| 666 |
+
callbacks=callbacks,
|
| 667 |
+
optimizers=optimizers,
|
| 668 |
+
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
# Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
|
| 672 |
+
# model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
|
| 673 |
+
# self.model_accepts_loss_kwargs to False to enable scaling.
|
| 674 |
+
self.model_accepts_loss_kwargs = False
|
| 675 |
+
|
| 676 |
+
# Add tags for models that have been loaded with the correct transformers version
|
| 677 |
+
if hasattr(self.model, "add_model_tags"):
|
| 678 |
+
self.model.add_model_tags(self._tag_names)
|
| 679 |
+
|
| 680 |
+
if not hasattr(self, "accelerator"):
|
| 681 |
+
raise AttributeError(
|
| 682 |
+
"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
def build_tokenized_answer(self, prompt, answer):
|
| 686 |
+
"""
|
| 687 |
+
Llama tokenizer does satisfy `enc(a + b) = enc(a) + enc(b)`.
|
| 688 |
+
It does ensure `enc(a + b) = enc(a) + enc(a + b)[len(enc(a)):]`.
|
| 689 |
+
Reference:
|
| 690 |
+
https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
|
| 691 |
+
"""
|
| 692 |
+
|
| 693 |
+
full_tokenized = self.processing_class(prompt + answer, add_special_tokens=False)
|
| 694 |
+
prompt_input_ids = self.processing_class(prompt, add_special_tokens=False)["input_ids"]
|
| 695 |
+
|
| 696 |
+
answer_input_ids = full_tokenized["input_ids"][len(prompt_input_ids) :]
|
| 697 |
+
answer_attention_mask = full_tokenized["attention_mask"][len(prompt_input_ids) :]
|
| 698 |
+
|
| 699 |
+
# Concat tokens to form `enc(a) + enc(a + b)[len(enc(a)):]`
|
| 700 |
+
full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids])
|
| 701 |
+
|
| 702 |
+
# Prepare input tokens for token by token comparison
|
| 703 |
+
full_input_ids = np.array(full_tokenized["input_ids"])
|
| 704 |
+
|
| 705 |
+
if len(full_input_ids) != len(full_concat_input_ids):
|
| 706 |
+
raise ValueError("Prompt input ids and answer input ids should have the same length.")
|
| 707 |
+
|
| 708 |
+
# On some tokenizers, like Llama-2 tokenizer, there are occasions where tokens
|
| 709 |
+
# can be merged together when tokenizing prompt+answer. This could result
|
| 710 |
+
# on the last token from the prompt being different when tokenized on its own
|
| 711 |
+
# vs when done as prompt+answer.
|
| 712 |
+
response_token_ids_start_idx = len(prompt_input_ids)
|
| 713 |
+
|
| 714 |
+
# If tokenized prompt is different than both prompt+answer, then it means the
|
| 715 |
+
# last token has changed due to merging.
|
| 716 |
+
if prompt_input_ids != full_tokenized["input_ids"][:response_token_ids_start_idx]:
|
| 717 |
+
response_token_ids_start_idx -= 1
|
| 718 |
+
|
| 719 |
+
prompt_input_ids = full_tokenized["input_ids"][:response_token_ids_start_idx]
|
| 720 |
+
prompt_attention_mask = full_tokenized["attention_mask"][:response_token_ids_start_idx]
|
| 721 |
+
|
| 722 |
+
if len(prompt_input_ids) != len(prompt_attention_mask):
|
| 723 |
+
raise ValueError("Prompt input ids and attention mask should have the same length.")
|
| 724 |
+
|
| 725 |
+
answer_input_ids = full_tokenized["input_ids"][response_token_ids_start_idx:]
|
| 726 |
+
answer_attention_mask = full_tokenized["attention_mask"][response_token_ids_start_idx:]
|
| 727 |
+
|
| 728 |
+
return dict(
|
| 729 |
+
prompt_input_ids=prompt_input_ids,
|
| 730 |
+
prompt_attention_mask=prompt_attention_mask,
|
| 731 |
+
input_ids=answer_input_ids,
|
| 732 |
+
attention_mask=answer_attention_mask,
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
def tokenize_row(self, feature, model: Optional[Union[PreTrainedModel, nn.Module]] = None) -> dict:
|
| 736 |
+
"""Tokenize a single row from a CPO specific dataset.
|
| 737 |
+
|
| 738 |
+
At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
|
| 739 |
+
in case the prompt + chosen or prompt + rejected responses is/are too long. First
|
| 740 |
+
we truncate the prompt; if we're still too long, we truncate the chosen/rejected.
|
| 741 |
+
|
| 742 |
+
We also create the labels for the chosen/rejected responses, which are of length equal to
|
| 743 |
+
the sum of the length of the prompt and the chosen/rejected response, with
|
| 744 |
+
label_pad_token_id for the prompt tokens.
|
| 745 |
+
"""
|
| 746 |
+
batch = {}
|
| 747 |
+
prompt = feature["prompt"]
|
| 748 |
+
chosen = feature["chosen"]
|
| 749 |
+
rejected = feature["rejected"]
|
| 750 |
+
|
| 751 |
+
if not self.is_encoder_decoder:
|
| 752 |
+
# Check issues below for more details
|
| 753 |
+
# 1. https://github.com/huggingface/trl/issues/907
|
| 754 |
+
# 2. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
|
| 755 |
+
# 3. https://github.com/LianjiaTech/BELLE/issues/337
|
| 756 |
+
|
| 757 |
+
if not isinstance(prompt, str):
|
| 758 |
+
raise ValueError(f"prompt should be an str but got {type(prompt)}")
|
| 759 |
+
prompt_tokens = self.processing_class(prompt, add_special_tokens=False)
|
| 760 |
+
prompt_tokens = {f"prompt_{k}": v for k, v in prompt_tokens.items()}
|
| 761 |
+
|
| 762 |
+
if not isinstance(chosen, str):
|
| 763 |
+
raise ValueError(f"chosen should be an str but got {type(chosen)}")
|
| 764 |
+
chosen_tokens = self.build_tokenized_answer(prompt, chosen)
|
| 765 |
+
|
| 766 |
+
if not isinstance(rejected, str):
|
| 767 |
+
raise ValueError(f"rejected should be an str but got {type(rejected)}")
|
| 768 |
+
rejected_tokens = self.build_tokenized_answer(prompt, rejected)
|
| 769 |
+
|
| 770 |
+
# Last prompt token might get merged by tokenizer and
|
| 771 |
+
# it should not be included for generation if that happens
|
| 772 |
+
prompt_len_input_ids = len(prompt_tokens["prompt_input_ids"])
|
| 773 |
+
|
| 774 |
+
chosen_prompt_len_input_ids = len(chosen_tokens["prompt_input_ids"])
|
| 775 |
+
rejected_prompt_len_input_ids = len(rejected_tokens["prompt_input_ids"])
|
| 776 |
+
prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids)
|
| 777 |
+
|
| 778 |
+
for k, v in prompt_tokens.items():
|
| 779 |
+
prompt_tokens[k] = v[:prompt_len_input_ids]
|
| 780 |
+
|
| 781 |
+
# Make sure prompts only have one different token at most an
|
| 782 |
+
# and length only differs by 1 at most
|
| 783 |
+
num_diff_tokens = sum(
|
| 784 |
+
[a != b for a, b in zip(chosen_tokens["prompt_input_ids"], rejected_tokens["prompt_input_ids"])]
|
| 785 |
+
)
|
| 786 |
+
num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids)
|
| 787 |
+
if num_diff_tokens > 1 or num_diff_len > 1:
|
| 788 |
+
raise ValueError(
|
| 789 |
+
"Chosen and rejected prompt_input_ids might only differ on the "
|
| 790 |
+
"last token due to tokenizer merge ops."
|
| 791 |
+
)
|
| 792 |
+
|
| 793 |
+
# add BOS token to head of prompt. Avoid adding if it's already there
|
| 794 |
+
prompt_tokens, chosen_tokens, rejected_tokens = add_bos_token_if_needed(
|
| 795 |
+
self.processing_class.bos_token_id,
|
| 796 |
+
prompt_len_input_ids,
|
| 797 |
+
prompt_tokens,
|
| 798 |
+
chosen_prompt_len_input_ids,
|
| 799 |
+
chosen_tokens,
|
| 800 |
+
rejected_prompt_len_input_ids,
|
| 801 |
+
rejected_tokens,
|
| 802 |
+
)
|
| 803 |
+
|
| 804 |
+
# add EOS token to end of answer. Avoid adding if it's already there
|
| 805 |
+
chosen_tokens, rejected_tokens = add_eos_token_if_needed(
|
| 806 |
+
self.processing_class.eos_token_id, chosen_tokens, rejected_tokens
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"]))
|
| 810 |
+
|
| 811 |
+
# if combined sequence is too long, truncate the prompt
|
| 812 |
+
for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]:
|
| 813 |
+
if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
|
| 814 |
+
if self.truncation_mode == "keep_start":
|
| 815 |
+
for k in ["prompt_input_ids", "prompt_attention_mask"]:
|
| 816 |
+
answer_tokens[k] = answer_tokens[k][: self.max_prompt_length]
|
| 817 |
+
elif self.truncation_mode == "keep_end":
|
| 818 |
+
for k in ["prompt_input_ids", "prompt_attention_mask"]:
|
| 819 |
+
answer_tokens[k] = answer_tokens[k][-self.max_prompt_length :]
|
| 820 |
+
else:
|
| 821 |
+
raise ValueError(f"Unknown truncation mode: {self.truncation_mode}")
|
| 822 |
+
|
| 823 |
+
# if that's still too long, truncate the response
|
| 824 |
+
for answer_tokens in [chosen_tokens, rejected_tokens]:
|
| 825 |
+
if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
|
| 826 |
+
for k in ["input_ids", "attention_mask"]:
|
| 827 |
+
answer_tokens[k] = answer_tokens[k][: self.max_length - self.max_prompt_length]
|
| 828 |
+
|
| 829 |
+
# Create labels
|
| 830 |
+
chosen_sequence_tokens = {
|
| 831 |
+
k: chosen_tokens[f"prompt_{k}"] + chosen_tokens[k] for k in ["input_ids", "attention_mask"]
|
| 832 |
+
}
|
| 833 |
+
rejected_sequence_tokens = {
|
| 834 |
+
k: rejected_tokens[f"prompt_{k}"] + rejected_tokens[k] for k in ["input_ids", "attention_mask"]
|
| 835 |
+
}
|
| 836 |
+
chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:]
|
| 837 |
+
chosen_sequence_tokens["labels"][: len(chosen_tokens["prompt_input_ids"])] = [
|
| 838 |
+
self.label_pad_token_id
|
| 839 |
+
] * len(chosen_tokens["prompt_input_ids"])
|
| 840 |
+
rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:]
|
| 841 |
+
rejected_sequence_tokens["labels"][: len(rejected_tokens["prompt_input_ids"])] = [
|
| 842 |
+
self.label_pad_token_id
|
| 843 |
+
] * len(rejected_tokens["prompt_input_ids"])
|
| 844 |
+
|
| 845 |
+
for k, toks in {
|
| 846 |
+
"chosen_": chosen_sequence_tokens,
|
| 847 |
+
"rejected_": rejected_sequence_tokens,
|
| 848 |
+
"": prompt_tokens,
|
| 849 |
+
}.items():
|
| 850 |
+
for type_key, tokens in toks.items():
|
| 851 |
+
if type_key == "token_type_ids":
|
| 852 |
+
continue
|
| 853 |
+
batch[f"{k}{type_key}"] = tokens
|
| 854 |
+
|
| 855 |
+
else:
|
| 856 |
+
chosen_tokens = self.processing_class(
|
| 857 |
+
chosen, truncation=True, max_length=self.max_completion_length, add_special_tokens=True
|
| 858 |
+
)
|
| 859 |
+
rejected_tokens = self.processing_class(
|
| 860 |
+
rejected, truncation=True, max_length=self.max_completion_length, add_special_tokens=True
|
| 861 |
+
)
|
| 862 |
+
prompt_tokens = self.processing_class(
|
| 863 |
+
prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True
|
| 864 |
+
)
|
| 865 |
+
|
| 866 |
+
batch["chosen_labels"] = chosen_tokens["input_ids"]
|
| 867 |
+
batch["rejected_labels"] = rejected_tokens["input_ids"]
|
| 868 |
+
batch["prompt_input_ids"] = prompt_tokens["input_ids"]
|
| 869 |
+
batch["prompt_attention_mask"] = prompt_tokens["attention_mask"]
|
| 870 |
+
|
| 871 |
+
if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
|
| 872 |
+
batch["rejected_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
|
| 873 |
+
labels=torch.tensor(batch["rejected_labels"])
|
| 874 |
+
)
|
| 875 |
+
batch["chosen_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
|
| 876 |
+
labels=torch.tensor(batch["chosen_labels"])
|
| 877 |
+
)
|
| 878 |
+
|
| 879 |
+
return batch
|
| 880 |
+
|
| 881 |
+
@staticmethod
|
| 882 |
+
def concatenated_inputs(
|
| 883 |
+
batch: dict[str, Union[list, torch.LongTensor]],
|
| 884 |
+
is_encoder_decoder: bool = False,
|
| 885 |
+
label_pad_token_id: int = -100,
|
| 886 |
+
padding_value: int = 0,
|
| 887 |
+
device: Optional[torch.device] = None,
|
| 888 |
+
) -> dict[str, torch.LongTensor]:
|
| 889 |
+
"""Concatenate the chosen and rejected inputs into a single tensor.
|
| 890 |
+
|
| 891 |
+
Args:
|
| 892 |
+
batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length).
|
| 893 |
+
is_encoder_decoder: Whether the model is an encoder-decoder model.
|
| 894 |
+
label_pad_token_id: The label pad token id.
|
| 895 |
+
padding_value: The padding value to use for the concatenated inputs_ids.
|
| 896 |
+
device: The device for the concatenated inputs.
|
| 897 |
+
|
| 898 |
+
Returns:
|
| 899 |
+
A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'.
|
| 900 |
+
"""
|
| 901 |
+
concatenated_batch = {}
|
| 902 |
+
|
| 903 |
+
if is_encoder_decoder:
|
| 904 |
+
max_length = max(batch["chosen_labels"].shape[1], batch["rejected_labels"].shape[1])
|
| 905 |
+
else:
|
| 906 |
+
max_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1])
|
| 907 |
+
|
| 908 |
+
for k in batch:
|
| 909 |
+
if k.startswith("chosen") and isinstance(batch[k], torch.Tensor):
|
| 910 |
+
if "labels" in k or is_encoder_decoder:
|
| 911 |
+
pad_value = label_pad_token_id
|
| 912 |
+
elif k.endswith("_input_ids"):
|
| 913 |
+
pad_value = padding_value
|
| 914 |
+
elif k.endswith("_attention_mask"):
|
| 915 |
+
pad_value = 0
|
| 916 |
+
concatenated_key = k.replace("chosen", "concatenated")
|
| 917 |
+
concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)
|
| 918 |
+
for k in batch:
|
| 919 |
+
if k.startswith("rejected") and isinstance(batch[k], torch.Tensor):
|
| 920 |
+
if "labels" in k or is_encoder_decoder:
|
| 921 |
+
pad_value = label_pad_token_id
|
| 922 |
+
elif k.endswith("_input_ids"):
|
| 923 |
+
pad_value = padding_value
|
| 924 |
+
elif k.endswith("_attention_mask"):
|
| 925 |
+
pad_value = 0
|
| 926 |
+
concatenated_key = k.replace("rejected", "concatenated")
|
| 927 |
+
concatenated_batch[concatenated_key] = torch.cat(
|
| 928 |
+
(
|
| 929 |
+
concatenated_batch[concatenated_key],
|
| 930 |
+
pad_to_length(batch[k], max_length, pad_value=pad_value),
|
| 931 |
+
),
|
| 932 |
+
dim=0,
|
| 933 |
+
).to(device=device)
|
| 934 |
+
|
| 935 |
+
if is_encoder_decoder:
|
| 936 |
+
concatenated_batch["concatenated_input_ids"] = batch["prompt_input_ids"].repeat(2, 1).to(device=device)
|
| 937 |
+
concatenated_batch["concatenated_attention_mask"] = (
|
| 938 |
+
batch["prompt_attention_mask"].repeat(2, 1).to(device=device)
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
return concatenated_batch
|
| 942 |
+
|
| 943 |
+
def cpo_loss(
|
| 944 |
+
self,
|
| 945 |
+
policy_chosen_logps: torch.FloatTensor,
|
| 946 |
+
policy_rejected_logps: torch.FloatTensor,
|
| 947 |
+
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
| 948 |
+
"""Compute the CPO loss for a batch of policy and reference model log probabilities.
|
| 949 |
+
|
| 950 |
+
Args:
|
| 951 |
+
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)
|
| 952 |
+
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)
|
| 953 |
+
|
| 954 |
+
Returns:
|
| 955 |
+
A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).
|
| 956 |
+
The losses tensor contains the CPO loss for each example in the batch.
|
| 957 |
+
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
|
| 958 |
+
"""
|
| 959 |
+
logits = (policy_chosen_logps - policy_rejected_logps).to(self.accelerator.device)
|
| 960 |
+
|
| 961 |
+
# The beta is a temperature parameter for the CPO loss, typically something in the range of 0.1 to 0.5.
|
| 962 |
+
# We ignore the reference model as beta -> 0. The label_smoothing parameter encodes our uncertainty about the labels and
|
| 963 |
+
# calculates a conservative CPO loss.
|
| 964 |
+
|
| 965 |
+
if self.loss_type == "simpo":
|
| 966 |
+
gamma_logratios = self.simpo_gamma / self.beta
|
| 967 |
+
logits = logits - gamma_logratios
|
| 968 |
+
# This reduces to Equation 3 from the CPO paper when label_smoothing -> 0.
|
| 969 |
+
losses = (
|
| 970 |
+
-F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing)
|
| 971 |
+
- F.logsigmoid(-self.beta * logits) * self.label_smoothing
|
| 972 |
+
)
|
| 973 |
+
elif self.loss_type == "sigmoid":
|
| 974 |
+
# This reduces to Equation 3 from the CPO paper when label_smoothing -> 0.
|
| 975 |
+
losses = (
|
| 976 |
+
-F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing)
|
| 977 |
+
- F.logsigmoid(-self.beta * logits) * self.label_smoothing
|
| 978 |
+
)
|
| 979 |
+
elif self.loss_type == "hinge":
|
| 980 |
+
losses = torch.relu(1 - self.beta * logits)
|
| 981 |
+
elif self.loss_type == "ipo":
|
| 982 |
+
# eqn (17) of the paper where beta is the regularization parameter for the IPO loss, denoted by tau in the paper.
|
| 983 |
+
losses = (logits - 1 / (2 * self.beta)) ** 2
|
| 984 |
+
else:
|
| 985 |
+
raise ValueError(
|
| 986 |
+
f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'simpo']"
|
| 987 |
+
)
|
| 988 |
+
|
| 989 |
+
chosen_rewards = self.beta * (policy_chosen_logps.to(self.accelerator.device)).detach()
|
| 990 |
+
rejected_rewards = self.beta * (policy_rejected_logps.to(self.accelerator.device)).detach()
|
| 991 |
+
|
| 992 |
+
return losses, chosen_rewards, rejected_rewards
|
| 993 |
+
|
| 994 |
+
@staticmethod
|
| 995 |
+
def get_batch_logps(
|
| 996 |
+
logits: torch.FloatTensor,
|
| 997 |
+
labels: torch.LongTensor,
|
| 998 |
+
average_log_prob: bool = False,
|
| 999 |
+
label_pad_token_id: int = -100,
|
| 1000 |
+
is_encoder_decoder: bool = False,
|
| 1001 |
+
) -> torch.FloatTensor:
|
| 1002 |
+
"""Compute the log probabilities of the given labels under the given logits.
|
| 1003 |
+
|
| 1004 |
+
Args:
|
| 1005 |
+
logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
|
| 1006 |
+
labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)
|
| 1007 |
+
average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.
|
| 1008 |
+
label_pad_token_id: The label pad token id.
|
| 1009 |
+
is_encoder_decoder: Whether the model is an encoder-decoder model.
|
| 1010 |
+
|
| 1011 |
+
Returns:
|
| 1012 |
+
A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.
|
| 1013 |
+
"""
|
| 1014 |
+
if logits.shape[:-1] != labels.shape:
|
| 1015 |
+
raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
|
| 1016 |
+
|
| 1017 |
+
if not is_encoder_decoder:
|
| 1018 |
+
labels = labels[:, 1:].clone()
|
| 1019 |
+
logits = logits[:, :-1, :]
|
| 1020 |
+
loss_mask = labels != label_pad_token_id
|
| 1021 |
+
|
| 1022 |
+
# dummy token; we'll ignore the losses on these tokens later
|
| 1023 |
+
labels[labels == label_pad_token_id] = 0
|
| 1024 |
+
|
| 1025 |
+
per_token_logps = selective_log_softmax(logits, labels)
|
| 1026 |
+
|
| 1027 |
+
if average_log_prob:
|
| 1028 |
+
return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
|
| 1029 |
+
else:
|
| 1030 |
+
return (per_token_logps * loss_mask).sum(-1)
|
| 1031 |
+
|
| 1032 |
+
def concatenated_forward(
|
| 1033 |
+
self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]
|
| 1034 |
+
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
| 1035 |
+
"""Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.
|
| 1036 |
+
|
| 1037 |
+
We do this to avoid doing two forward passes, because it's faster for FSDP.
|
| 1038 |
+
"""
|
| 1039 |
+
concatenated_batch = self.concatenated_inputs(
|
| 1040 |
+
batch,
|
| 1041 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 1042 |
+
label_pad_token_id=self.label_pad_token_id,
|
| 1043 |
+
padding_value=self.padding_value,
|
| 1044 |
+
device=self.accelerator.device,
|
| 1045 |
+
)
|
| 1046 |
+
len_chosen = batch["chosen_labels"].shape[0]
|
| 1047 |
+
|
| 1048 |
+
model_kwargs = (
|
| 1049 |
+
{
|
| 1050 |
+
"decoder_input_ids": self._shift_right(concatenated_batch["concatenated_labels"]),
|
| 1051 |
+
}
|
| 1052 |
+
if self.is_encoder_decoder
|
| 1053 |
+
else {}
|
| 1054 |
+
)
|
| 1055 |
+
|
| 1056 |
+
if self.aux_loss_enabled:
|
| 1057 |
+
model_kwargs["output_router_logits"] = True
|
| 1058 |
+
|
| 1059 |
+
outputs = model(
|
| 1060 |
+
concatenated_batch["concatenated_input_ids"],
|
| 1061 |
+
attention_mask=concatenated_batch["concatenated_attention_mask"],
|
| 1062 |
+
use_cache=False,
|
| 1063 |
+
**model_kwargs,
|
| 1064 |
+
)
|
| 1065 |
+
all_logits = outputs.logits
|
| 1066 |
+
|
| 1067 |
+
def cross_entropy_loss(logits, labels):
|
| 1068 |
+
if not self.is_encoder_decoder:
|
| 1069 |
+
# Shift so that tokens < n predict n
|
| 1070 |
+
logits = logits[..., :-1, :].contiguous()
|
| 1071 |
+
labels = labels[..., 1:].contiguous()
|
| 1072 |
+
# Flatten the tokens
|
| 1073 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 1074 |
+
logits = logits.view(-1, logits.shape[-1])
|
| 1075 |
+
labels = labels.view(-1)
|
| 1076 |
+
# Enable model parallelism
|
| 1077 |
+
labels = labels.to(logits.device)
|
| 1078 |
+
loss = loss_fct(logits, labels)
|
| 1079 |
+
return loss
|
| 1080 |
+
|
| 1081 |
+
labels = concatenated_batch["concatenated_labels"].clone()
|
| 1082 |
+
|
| 1083 |
+
if self.cpo_alpha == 0:
|
| 1084 |
+
nll_loss = torch.tensor(0.0).to(self.accelerator.device)
|
| 1085 |
+
else:
|
| 1086 |
+
nll_loss = cross_entropy_loss(all_logits[:len_chosen], labels[:len_chosen])
|
| 1087 |
+
|
| 1088 |
+
all_logps = self.get_batch_logps(
|
| 1089 |
+
all_logits,
|
| 1090 |
+
concatenated_batch["concatenated_labels"],
|
| 1091 |
+
average_log_prob=self.loss_type in ["ipo", "simpo"],
|
| 1092 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 1093 |
+
label_pad_token_id=self.label_pad_token_id,
|
| 1094 |
+
)
|
| 1095 |
+
|
| 1096 |
+
chosen_logps = all_logps[:len_chosen]
|
| 1097 |
+
rejected_logps = all_logps[len_chosen:]
|
| 1098 |
+
|
| 1099 |
+
chosen_logits = all_logits[:len_chosen]
|
| 1100 |
+
rejected_logits = all_logits[len_chosen:]
|
| 1101 |
+
|
| 1102 |
+
if self.aux_loss_enabled:
|
| 1103 |
+
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss, outputs.aux_loss)
|
| 1104 |
+
|
| 1105 |
+
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss)
|
| 1106 |
+
|
| 1107 |
+
def get_batch_loss_metrics(
|
| 1108 |
+
self,
|
| 1109 |
+
model,
|
| 1110 |
+
batch: dict[str, Union[list, torch.LongTensor]],
|
| 1111 |
+
train_eval: Literal["train", "eval"] = "train",
|
| 1112 |
+
):
|
| 1113 |
+
"""Compute the CPO loss and other metrics for the given batch of inputs for train or test."""
|
| 1114 |
+
metrics = {}
|
| 1115 |
+
|
| 1116 |
+
forward_output = self.concatenated_forward(model, batch)
|
| 1117 |
+
(
|
| 1118 |
+
policy_chosen_logps,
|
| 1119 |
+
policy_rejected_logps,
|
| 1120 |
+
policy_chosen_logits,
|
| 1121 |
+
policy_rejected_logits,
|
| 1122 |
+
policy_nll_loss,
|
| 1123 |
+
) = forward_output[:5]
|
| 1124 |
+
if self.aux_loss_enabled:
|
| 1125 |
+
aux_loss = forward_output[5]
|
| 1126 |
+
|
| 1127 |
+
losses, chosen_rewards, rejected_rewards = self.cpo_loss(
|
| 1128 |
+
policy_chosen_logps,
|
| 1129 |
+
policy_rejected_logps,
|
| 1130 |
+
)
|
| 1131 |
+
|
| 1132 |
+
loss = losses.mean() + self.cpo_alpha * policy_nll_loss
|
| 1133 |
+
reward_accuracies = (chosen_rewards > rejected_rewards).float()
|
| 1134 |
+
|
| 1135 |
+
prefix = "eval_" if train_eval == "eval" else ""
|
| 1136 |
+
metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(chosen_rewards).mean().item()
|
| 1137 |
+
metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(rejected_rewards).mean().item()
|
| 1138 |
+
metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(reward_accuracies).mean().item()
|
| 1139 |
+
metrics[f"{prefix}rewards/margins"] = (
|
| 1140 |
+
self.accelerator.gather_for_metrics(chosen_rewards - rejected_rewards).mean().item()
|
| 1141 |
+
)
|
| 1142 |
+
metrics[f"{prefix}logps/rejected"] = (
|
| 1143 |
+
self.accelerator.gather_for_metrics(policy_rejected_logps).detach().mean().item()
|
| 1144 |
+
)
|
| 1145 |
+
metrics[f"{prefix}logps/chosen"] = (
|
| 1146 |
+
self.accelerator.gather_for_metrics(policy_chosen_logps).detach().mean().item()
|
| 1147 |
+
)
|
| 1148 |
+
metrics[f"{prefix}logits/rejected"] = (
|
| 1149 |
+
self.accelerator.gather_for_metrics(policy_rejected_logits).detach().mean().item()
|
| 1150 |
+
)
|
| 1151 |
+
metrics[f"{prefix}logits/chosen"] = (
|
| 1152 |
+
self.accelerator.gather_for_metrics(policy_chosen_logits).detach().mean().item()
|
| 1153 |
+
)
|
| 1154 |
+
metrics[f"{prefix}nll_loss"] = self.accelerator.gather_for_metrics(policy_nll_loss).detach().mean().item()
|
| 1155 |
+
|
| 1156 |
+
if self.aux_loss_enabled:
|
| 1157 |
+
loss += self.aux_loss_coef * aux_loss
|
| 1158 |
+
|
| 1159 |
+
return loss, metrics
|
| 1160 |
+
|
| 1161 |
+
def compute_loss(
|
| 1162 |
+
self,
|
| 1163 |
+
model: Union[PreTrainedModel, nn.Module],
|
| 1164 |
+
inputs: dict[str, Union[torch.Tensor, Any]],
|
| 1165 |
+
return_outputs=False,
|
| 1166 |
+
num_items_in_batch=None,
|
| 1167 |
+
) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
|
| 1168 |
+
compute_loss_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1169 |
+
|
| 1170 |
+
with compute_loss_context_manager:
|
| 1171 |
+
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train")
|
| 1172 |
+
|
| 1173 |
+
# force log the metrics
|
| 1174 |
+
self.store_metrics(metrics, train_eval="train")
|
| 1175 |
+
|
| 1176 |
+
if return_outputs:
|
| 1177 |
+
return (loss, metrics)
|
| 1178 |
+
return loss
|
| 1179 |
+
|
| 1180 |
+
def generate_from_model(self, model, batch: dict[str, torch.LongTensor]) -> str:
|
| 1181 |
+
"""Generate samples from the model and reference model for the given batch of inputs."""
|
| 1182 |
+
|
| 1183 |
+
# If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
|
| 1184 |
+
# the torch cuda amp context manager as some hidden states are silently casted to full precision.
|
| 1185 |
+
generate_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1186 |
+
|
| 1187 |
+
with generate_context_manager:
|
| 1188 |
+
policy_output = model.generate(
|
| 1189 |
+
input_ids=batch["prompt_input_ids"],
|
| 1190 |
+
attention_mask=batch["prompt_attention_mask"],
|
| 1191 |
+
max_length=self.max_length,
|
| 1192 |
+
do_sample=True,
|
| 1193 |
+
pad_token_id=self.processing_class.pad_token_id,
|
| 1194 |
+
)
|
| 1195 |
+
|
| 1196 |
+
policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
|
| 1197 |
+
policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
|
| 1198 |
+
|
| 1199 |
+
return policy_output_decoded
|
| 1200 |
+
|
| 1201 |
+
def prediction_step(
|
| 1202 |
+
self,
|
| 1203 |
+
model: Union[PreTrainedModel, nn.Module],
|
| 1204 |
+
inputs: dict[str, Union[torch.Tensor, Any]],
|
| 1205 |
+
prediction_loss_only: bool,
|
| 1206 |
+
ignore_keys: Optional[list[str]] = None,
|
| 1207 |
+
):
|
| 1208 |
+
if ignore_keys is None:
|
| 1209 |
+
if hasattr(model, "config"):
|
| 1210 |
+
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
|
| 1211 |
+
else:
|
| 1212 |
+
ignore_keys = []
|
| 1213 |
+
|
| 1214 |
+
prediction_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1215 |
+
|
| 1216 |
+
with torch.no_grad(), prediction_context_manager:
|
| 1217 |
+
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval")
|
| 1218 |
+
|
| 1219 |
+
# force log the metrics
|
| 1220 |
+
self.store_metrics(metrics, train_eval="eval")
|
| 1221 |
+
|
| 1222 |
+
if prediction_loss_only:
|
| 1223 |
+
return (loss.detach(), None, None)
|
| 1224 |
+
|
| 1225 |
+
# logits for the chosen and rejected samples from model
|
| 1226 |
+
logits_dict = {
|
| 1227 |
+
"eval_logits/chosen": metrics["eval_logits/chosen"],
|
| 1228 |
+
"eval_logits/rejected": metrics["eval_logits/rejected"],
|
| 1229 |
+
}
|
| 1230 |
+
logits = tuple(v.unsqueeze(dim=0) for k, v in logits_dict.items() if k not in ignore_keys)
|
| 1231 |
+
logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device)
|
| 1232 |
+
labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
|
| 1233 |
+
|
| 1234 |
+
return (loss.detach(), logits, labels)
|
| 1235 |
+
|
| 1236 |
+
def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
|
| 1237 |
+
for key, value in metrics.items():
|
| 1238 |
+
self._stored_metrics[train_eval][key].append(value)
|
| 1239 |
+
|
| 1240 |
+
def evaluation_loop(
|
| 1241 |
+
self,
|
| 1242 |
+
dataloader: DataLoader,
|
| 1243 |
+
description: str,
|
| 1244 |
+
prediction_loss_only: Optional[bool] = None,
|
| 1245 |
+
ignore_keys: Optional[list[str]] = None,
|
| 1246 |
+
metric_key_prefix: str = "eval",
|
| 1247 |
+
) -> EvalLoopOutput:
|
| 1248 |
+
"""
|
| 1249 |
+
Overriding built-in evaluation loop to store metrics for each batch.
|
| 1250 |
+
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
|
| 1251 |
+
|
| 1252 |
+
Works both with or without labels.
|
| 1253 |
+
"""
|
| 1254 |
+
|
| 1255 |
+
# Sample and save to game log if requested (for one batch to save time)
|
| 1256 |
+
if self.generate_during_eval:
|
| 1257 |
+
# Generate random indices within the range of the total number of samples
|
| 1258 |
+
num_samples = len(dataloader.dataset)
|
| 1259 |
+
random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
|
| 1260 |
+
|
| 1261 |
+
# Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
|
| 1262 |
+
random_batch_dataset = dataloader.dataset.select(random_indices)
|
| 1263 |
+
random_batch = self.data_collator(random_batch_dataset)
|
| 1264 |
+
random_batch = self._prepare_inputs(random_batch)
|
| 1265 |
+
|
| 1266 |
+
policy_output_decoded = self.generate_from_model(self.model, random_batch)
|
| 1267 |
+
|
| 1268 |
+
table = pd.DataFrame(
|
| 1269 |
+
columns=["Prompt", "Policy"],
|
| 1270 |
+
data=[
|
| 1271 |
+
[prompt, pol[len(prompt) :]] for prompt, pol in zip(random_batch["prompt"], policy_output_decoded)
|
| 1272 |
+
],
|
| 1273 |
+
)
|
| 1274 |
+
if "wandb" in self.args.report_to:
|
| 1275 |
+
wandb.log({"game_log": wandb.Table(data=table)})
|
| 1276 |
+
|
| 1277 |
+
if "comet_ml" in self.args.report_to:
|
| 1278 |
+
log_table_to_comet_experiment(
|
| 1279 |
+
name="game_log.csv",
|
| 1280 |
+
table=table,
|
| 1281 |
+
)
|
| 1282 |
+
|
| 1283 |
+
# Base evaluation
|
| 1284 |
+
initial_output = super().evaluation_loop(
|
| 1285 |
+
dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
|
| 1286 |
+
)
|
| 1287 |
+
|
| 1288 |
+
return initial_output
|
| 1289 |
+
|
| 1290 |
+
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
|
| 1291 |
+
"""
|
| 1292 |
+
Log `logs` on the various objects watching training, including stored metrics.
|
| 1293 |
+
|
| 1294 |
+
Args:
|
| 1295 |
+
logs (`dict[str, float]`):
|
| 1296 |
+
The values to log.
|
| 1297 |
+
start_time (`float` or `None`, *optional*, defaults to `None`):
|
| 1298 |
+
Start time of the training.
|
| 1299 |
+
"""
|
| 1300 |
+
# logs either has 'loss' or 'eval_loss'
|
| 1301 |
+
train_eval = "train" if "loss" in logs else "eval"
|
| 1302 |
+
# Add averaged stored metrics to logs
|
| 1303 |
+
for key, metrics in self._stored_metrics[train_eval].items():
|
| 1304 |
+
logs[key] = torch.tensor(metrics).mean().item()
|
| 1305 |
+
del self._stored_metrics[train_eval]
|
| 1306 |
+
|
| 1307 |
+
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
|
| 1308 |
+
return super().log(logs, start_time)
|
| 1309 |
+
else: # transformers<=4.46
|
| 1310 |
+
return super().log(logs)
|
| 1311 |
+
|
| 1312 |
+
def _shift_right(self, input_ids):
|
| 1313 |
+
if self.decoder_start_token_id is None:
|
| 1314 |
+
raise ValueError(
|
| 1315 |
+
"model.config.decoder_start_token_id has to be defined. It is usually set to the pad_token_id."
|
| 1316 |
+
)
|
| 1317 |
+
|
| 1318 |
+
# shift inputs to the right
|
| 1319 |
+
if is_torch_fx_proxy(input_ids):
|
| 1320 |
+
# Item assignment is not supported natively for proxies.
|
| 1321 |
+
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), self.decoder_start_token_id)
|
| 1322 |
+
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
|
| 1323 |
+
else:
|
| 1324 |
+
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
|
| 1325 |
+
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
|
| 1326 |
+
shifted_input_ids[..., 0] = self.decoder_start_token_id
|
| 1327 |
+
|
| 1328 |
+
if self.pad_token_id is None:
|
| 1329 |
+
raise ValueError("model.config.pad_token_id has to be defined.")
|
| 1330 |
+
# replace possible -100 values in labels by `pad_token_id`
|
| 1331 |
+
shifted_input_ids.masked_fill_(shifted_input_ids == -100, self.pad_token_id)
|
| 1332 |
+
|
| 1333 |
+
return shifted_input_ids
|
| 1334 |
+
|
| 1335 |
+
def create_model_card(
|
| 1336 |
+
self,
|
| 1337 |
+
model_name: Optional[str] = None,
|
| 1338 |
+
dataset_name: Optional[str] = None,
|
| 1339 |
+
tags: Union[str, list[str], None] = None,
|
| 1340 |
+
):
|
| 1341 |
+
"""
|
| 1342 |
+
Creates a draft of a model card using the information available to the `Trainer`.
|
| 1343 |
+
|
| 1344 |
+
Args:
|
| 1345 |
+
model_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1346 |
+
Name of the model.
|
| 1347 |
+
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1348 |
+
Name of the dataset used for training.
|
| 1349 |
+
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
| 1350 |
+
Tags to be associated with the model card.
|
| 1351 |
+
"""
|
| 1352 |
+
if not self.is_world_process_zero():
|
| 1353 |
+
return
|
| 1354 |
+
|
| 1355 |
+
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
| 1356 |
+
base_model = self.model.config._name_or_path
|
| 1357 |
+
else:
|
| 1358 |
+
base_model = None
|
| 1359 |
+
|
| 1360 |
+
tags = tags or []
|
| 1361 |
+
if isinstance(tags, str):
|
| 1362 |
+
tags = [tags]
|
| 1363 |
+
|
| 1364 |
+
if hasattr(self.model.config, "unsloth_version"):
|
| 1365 |
+
tags.append("unsloth")
|
| 1366 |
+
|
| 1367 |
+
citation = textwrap.dedent("""\
|
| 1368 |
+
@inproceedings{xu2024contrastive,
|
| 1369 |
+
title = {{Contrastive Preference Optimization: Pushing the Boundaries of LLM Performance in Machine Translation}},
|
| 1370 |
+
author = {Haoran Xu and Amr Sharaf and Yunmo Chen and Weiting Tan and Lingfeng Shen and Benjamin Van Durme and Kenton Murray and Young Jin Kim},
|
| 1371 |
+
year = 2024,
|
| 1372 |
+
booktitle = {Forty-first International Conference on Machine Learning, {ICML} 2024, Vienna, Austria, July 21-27, 2024},
|
| 1373 |
+
publisher = {OpenReview.net},
|
| 1374 |
+
url = {https://openreview.net/forum?id=51iwkioZpn}
|
| 1375 |
+
}""")
|
| 1376 |
+
|
| 1377 |
+
model_card = generate_model_card(
|
| 1378 |
+
base_model=base_model,
|
| 1379 |
+
model_name=model_name,
|
| 1380 |
+
hub_model_id=self.hub_model_id,
|
| 1381 |
+
dataset_name=dataset_name,
|
| 1382 |
+
tags=tags,
|
| 1383 |
+
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
| 1384 |
+
comet_url=get_comet_experiment_url(),
|
| 1385 |
+
trainer_name="CPO",
|
| 1386 |
+
trainer_citation=citation,
|
| 1387 |
+
paper_title="Contrastive Preference Optimization: Pushing the Boundaries of LLM Performance in Machine Translation",
|
| 1388 |
+
paper_id="2401.08417",
|
| 1389 |
+
)
|
| 1390 |
+
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
| 1391 |
+
class UnslothCPOTrainer(_UnslothCPOTrainer):
|
| 1392 |
+
"""
|
| 1393 |
+
|
| 1394 |
+
Initialize CPOTrainer.
|
| 1395 |
+
|
| 1396 |
+
Args:
|
| 1397 |
+
model (`transformers.PreTrainedModel`):
|
| 1398 |
+
The model to train, preferably an `AutoModelForSequenceClassification`.
|
| 1399 |
+
args (`CPOConfig`):
|
| 1400 |
+
The CPO config arguments to use for training.
|
| 1401 |
+
data_collator (`transformers.DataCollator`):
|
| 1402 |
+
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
|
| 1403 |
+
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
|
| 1404 |
+
train_dataset (`datasets.Dataset`):
|
| 1405 |
+
The dataset to use for training.
|
| 1406 |
+
eval_dataset (`datasets.Dataset`):
|
| 1407 |
+
The dataset to use for evaluation.
|
| 1408 |
+
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
|
| 1409 |
+
Processing class used to process the data. If provided, will be used to automatically process the inputs
|
| 1410 |
+
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
|
| 1411 |
+
reuse the fine-tuned model.
|
| 1412 |
+
model_init (`Callable[[], transformers.PreTrainedModel]`):
|
| 1413 |
+
The model initializer to use for training. If None is specified, the default model initializer will be used.
|
| 1414 |
+
callbacks (`list[transformers.TrainerCallback]`):
|
| 1415 |
+
The callbacks to use for training.
|
| 1416 |
+
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
|
| 1417 |
+
The optimizer and scheduler to use for training.
|
| 1418 |
+
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
|
| 1419 |
+
The function to use to preprocess the logits before computing the metrics.
|
| 1420 |
+
peft_config (`dict`, defaults to `None`):
|
| 1421 |
+
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
|
| 1422 |
+
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
|
| 1423 |
+
The function to use to compute the metrics. Must take a `EvalPrediction` and return
|
| 1424 |
+
a dictionary string to metric values.
|
| 1425 |
+
|
| 1426 |
+
"""
|
| 1427 |
+
def __init__(
|
| 1428 |
+
self,
|
| 1429 |
+
model = None,
|
| 1430 |
+
args = None,
|
| 1431 |
+
data_collator = None,
|
| 1432 |
+
train_dataset = None,
|
| 1433 |
+
eval_dataset = None,
|
| 1434 |
+
processing_class = None,
|
| 1435 |
+
model_init = None,
|
| 1436 |
+
callbacks = None,
|
| 1437 |
+
preprocess_logits_for_metrics = None,
|
| 1438 |
+
peft_config = None,
|
| 1439 |
+
compute_metrics = None,
|
| 1440 |
+
**kwargs
|
| 1441 |
+
):
|
| 1442 |
+
if args is None: args = UnslothCPOConfig()
|
| 1443 |
+
use_bf16 = getattr(args, 'bf16', False)
|
| 1444 |
+
use_fp16 = getattr(args, 'fp16', False)
|
| 1445 |
+
force_float32 = False
|
| 1446 |
+
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
| 1447 |
+
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
| 1448 |
+
force_float32 = True
|
| 1449 |
+
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
| 1450 |
+
dtype = getattr(model.config, 'torch_dtype', None)
|
| 1451 |
+
if dtype is None: dtype = model.get_input_embeddings().dtype
|
| 1452 |
+
from unsloth_zoo.utils import _get_dtype
|
| 1453 |
+
dtype = _get_dtype(dtype)
|
| 1454 |
+
float16 = dtype == torch.float16
|
| 1455 |
+
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
| 1456 |
+
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
| 1457 |
+
if force_float32:
|
| 1458 |
+
args.fp16 = False
|
| 1459 |
+
args.bf16 = False
|
| 1460 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
| 1461 |
+
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
| 1462 |
+
args.fp16 = float16
|
| 1463 |
+
args.bf16 = not float16
|
| 1464 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
| 1465 |
+
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
| 1466 |
+
args.eval_strategy = 'steps'
|
| 1467 |
+
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
| 1468 |
+
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
| 1469 |
+
if ga_steps is not None and ga_steps > 1:
|
| 1470 |
+
from transformers import __version__ as transformers_version
|
| 1471 |
+
if Version(transformers_version) <= Version('4.45.2'):
|
| 1472 |
+
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
| 1473 |
+
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
| 1474 |
+
if getattr(args, 'eval_strategy', 'no') != 'no':
|
| 1475 |
+
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
| 1476 |
+
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
| 1477 |
+
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
| 1478 |
+
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
| 1479 |
+
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
| 1480 |
+
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
| 1481 |
+
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
| 1482 |
+
if force_float32:
|
| 1483 |
+
args.bf16_full_eval = False
|
| 1484 |
+
args.fp16_full_eval = False
|
| 1485 |
+
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
| 1486 |
+
args.bf16_full_eval = True
|
| 1487 |
+
args.fp16_full_eval = False
|
| 1488 |
+
elif not bf16_full_eval and not fp16_full_eval:
|
| 1489 |
+
args.bf16_full_eval = args.bf16
|
| 1490 |
+
args.fp16_full_eval = args.fp16
|
| 1491 |
+
_output_logits = False
|
| 1492 |
+
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
| 1493 |
+
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
| 1494 |
+
if _output_logits:
|
| 1495 |
+
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
| 1496 |
+
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
| 1497 |
+
pass
|
| 1498 |
+
else:
|
| 1499 |
+
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
| 1500 |
+
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
| 1501 |
+
if args_max_seq_length is None and model_max_seq_length is not None:
|
| 1502 |
+
max_seq_length = model.max_seq_length
|
| 1503 |
+
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
| 1504 |
+
if model is not None and hasattr(model, 'for_training'):
|
| 1505 |
+
model.for_training()
|
| 1506 |
+
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
| 1507 |
+
if 'processing_class' in locals():
|
| 1508 |
+
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
| 1509 |
+
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
| 1510 |
+
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
| 1511 |
+
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
| 1512 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1513 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
| 1514 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer, mlm = False)
|
| 1515 |
+
elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
| 1516 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer)
|
| 1517 |
+
else:
|
| 1518 |
+
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
| 1519 |
+
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
| 1520 |
+
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
| 1521 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1522 |
+
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
| 1523 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
| 1524 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer)
|
| 1525 |
+
else:
|
| 1526 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False)
|
| 1527 |
+
other_metrics = []
|
| 1528 |
+
|
| 1529 |
+
from unsloth_zoo.logging_utils import PatchRLStatistics
|
| 1530 |
+
PatchRLStatistics('cpo_trainer', other_metrics)
|
| 1531 |
+
|
| 1532 |
+
super().__init__(
|
| 1533 |
+
model = model,
|
| 1534 |
+
args = args,
|
| 1535 |
+
data_collator = data_collator,
|
| 1536 |
+
train_dataset = train_dataset,
|
| 1537 |
+
eval_dataset = eval_dataset,
|
| 1538 |
+
processing_class = processing_class,
|
| 1539 |
+
model_init = model_init,
|
| 1540 |
+
callbacks = callbacks,
|
| 1541 |
+
preprocess_logits_for_metrics = preprocess_logits_for_metrics,
|
| 1542 |
+
peft_config = peft_config,
|
| 1543 |
+
compute_metrics = compute_metrics,**kwargs)
|
| 1544 |
+
if hasattr(self, 'neftune_hook_handle'):
|
| 1545 |
+
self.neftune_hook_handle.remove()
|
| 1546 |
+
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
| 1547 |
+
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
| 1548 |
+
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
| 1549 |
+
pass
|
| 1550 |
+
|
| 1551 |
+
pass
|
unsloth_compiled_cache/UnslothDPOTrainer.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
unsloth_compiled_cache/UnslothGKDTrainer.py
ADDED
|
@@ -0,0 +1,857 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
2025.4.1
|
| 3 |
+
2025.4.1
|
| 4 |
+
4.51.3
|
| 5 |
+
0.15.2
|
| 6 |
+
__UNSLOTH_VERSIONING__
|
| 7 |
+
"""
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from torch.nn import functional as F
|
| 12 |
+
from trl.trainer.gkd_trainer import (Any, AutoModelForCausalLM, BaseImageProcessor, Callable, DataCollator, DataCollatorForChatML, Dataset, EvalPrediction, F, FeatureExtractionMixin, GKDConfig, GKDTrainer, GenerationConfig, Optional, PeftConfig, PreTrainedModel, PreTrainedModelWrapper, PreTrainedTokenizerBase, ProcessorMixin, SFTTrainer, TrainerCallback, Union, deepcopy, disable_dropout_in_model, empty_cache, generate_model_card, get_comet_experiment_url, is_wandb_available, nn, os, random, textwrap, torch, unwrap_model_for_generation)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from typing import *
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from packaging.version import Version
|
| 19 |
+
import torch
|
| 20 |
+
import numpy as np
|
| 21 |
+
from contextlib import nullcontext
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling
|
| 24 |
+
|
| 25 |
+
torch_compile_options = {
|
| 26 |
+
"epilogue_fusion" : True,
|
| 27 |
+
"max_autotune" : False,
|
| 28 |
+
"shape_padding" : True,
|
| 29 |
+
"trace.enabled" : False,
|
| 30 |
+
"triton.cudagraphs" : False,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
| 34 |
+
def selective_log_softmax(logits, index):
|
| 35 |
+
logits = logits.to(torch.float32)
|
| 36 |
+
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
| 37 |
+
# loop to reduce peak mem consumption
|
| 38 |
+
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
| 39 |
+
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
| 40 |
+
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
| 41 |
+
return per_token_logps
|
| 42 |
+
@dataclass
|
| 43 |
+
class UnslothGKDConfig(GKDConfig):
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
Configuration class for [`GKDTrainer`].
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
temperature (`float`, *optional*, defaults to `0.9`):
|
| 50 |
+
Temperature for sampling. The higher the temperature, the more random the completions.
|
| 51 |
+
lmbda (`float`, *optional*, defaults to `0.5`):
|
| 52 |
+
Lambda parameter that controls the student data fraction (i.e., the proportion of on-policy
|
| 53 |
+
student-generated outputs).
|
| 54 |
+
beta (`float`, *optional*, defaults to `0.5`):
|
| 55 |
+
Interpolation coefficient between `0.0` and `1.0` of the Generalized Jensen-Shannon Divergence loss. When
|
| 56 |
+
beta is `0.0`, the loss is the KL divergence. When beta is `1.0`, the loss is the Inverse KL Divergence.
|
| 57 |
+
max_new_tokens (`int`, *optional*, defaults to `128`):
|
| 58 |
+
Maximum number of tokens to generate per completion.
|
| 59 |
+
teacher_model_name_or_path (`str` or `None`, *optional*, defaults to `None`):
|
| 60 |
+
Model name or path of the teacher model. If `None`, the teacher model will be the same as the model
|
| 61 |
+
being trained.
|
| 62 |
+
teacher_model_init_kwargs (`dict[str, Any]]` or `None`, *optional*, defaults to `None`):
|
| 63 |
+
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the teacher model
|
| 64 |
+
from a string.
|
| 65 |
+
disable_dropout (`bool`, *optional*, defaults to `True`):
|
| 66 |
+
Whether to disable dropout in the model.
|
| 67 |
+
seq_kd (`bool`, *optional*, defaults to `False`):
|
| 68 |
+
Seq_kd parameter that controls whether to perform Sequence-Level KD (can be viewed as supervised FT
|
| 69 |
+
on teacher-generated output).
|
| 70 |
+
|
| 71 |
+
"""
|
| 72 |
+
vllm_sampling_params: Optional[Any] = field(
|
| 73 |
+
default = None,
|
| 74 |
+
metadata = {'help': 'vLLM SamplingParams'},
|
| 75 |
+
)
|
| 76 |
+
unsloth_num_chunks : Optional[int] = field(
|
| 77 |
+
default = -1,
|
| 78 |
+
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
| 79 |
+
)
|
| 80 |
+
def __init__(
|
| 81 |
+
self,
|
| 82 |
+
output_dir = None,
|
| 83 |
+
overwrite_output_dir = None,
|
| 84 |
+
do_train = False,
|
| 85 |
+
do_eval = False,
|
| 86 |
+
do_predict = False,
|
| 87 |
+
eval_strategy = 'no',
|
| 88 |
+
prediction_loss_only = False,
|
| 89 |
+
per_device_train_batch_size = 4,
|
| 90 |
+
per_device_eval_batch_size = 4,
|
| 91 |
+
per_gpu_train_batch_size = None,
|
| 92 |
+
per_gpu_eval_batch_size = None,
|
| 93 |
+
gradient_accumulation_steps = 2,
|
| 94 |
+
eval_accumulation_steps = 2,
|
| 95 |
+
eval_delay = 0,
|
| 96 |
+
torch_empty_cache_steps = 250,
|
| 97 |
+
learning_rate = 5e-05,
|
| 98 |
+
weight_decay = 0.01,
|
| 99 |
+
adam_beta1 = 0.9,
|
| 100 |
+
adam_beta2 = 0.999,
|
| 101 |
+
adam_epsilon = 1e-08,
|
| 102 |
+
max_grad_norm = 1.0,
|
| 103 |
+
num_train_epochs = 3.0,
|
| 104 |
+
max_steps = -1,
|
| 105 |
+
lr_scheduler_type = 'linear',
|
| 106 |
+
warmup_ratio = 0.1,
|
| 107 |
+
warmup_steps = 0,
|
| 108 |
+
log_level = 'passive',
|
| 109 |
+
log_level_replica = 'warning',
|
| 110 |
+
log_on_each_node = True,
|
| 111 |
+
logging_dir = None,
|
| 112 |
+
logging_strategy = 'steps',
|
| 113 |
+
logging_first_step = False,
|
| 114 |
+
logging_steps = 1,
|
| 115 |
+
logging_nan_inf_filter = False,
|
| 116 |
+
save_strategy = 'steps',
|
| 117 |
+
save_steps = 500,
|
| 118 |
+
save_total_limit = None,
|
| 119 |
+
save_safetensors = True,
|
| 120 |
+
save_on_each_node = False,
|
| 121 |
+
save_only_model = False,
|
| 122 |
+
restore_callback_states_from_checkpoint = False,
|
| 123 |
+
no_cuda = False,
|
| 124 |
+
use_cpu = False,
|
| 125 |
+
use_mps_device = False,
|
| 126 |
+
seed = 3407,
|
| 127 |
+
data_seed = 3407,
|
| 128 |
+
jit_mode_eval = False,
|
| 129 |
+
use_ipex = False,
|
| 130 |
+
bf16 = False,
|
| 131 |
+
fp16 = False,
|
| 132 |
+
fp16_opt_level = 'O1',
|
| 133 |
+
half_precision_backend = 'auto',
|
| 134 |
+
bf16_full_eval = False,
|
| 135 |
+
fp16_full_eval = False,
|
| 136 |
+
tf32 = None,
|
| 137 |
+
local_rank = -1,
|
| 138 |
+
ddp_backend = None,
|
| 139 |
+
tpu_num_cores = None,
|
| 140 |
+
tpu_metrics_debug = False,
|
| 141 |
+
debug = '',
|
| 142 |
+
dataloader_drop_last = False,
|
| 143 |
+
eval_steps = None,
|
| 144 |
+
dataloader_num_workers = 0,
|
| 145 |
+
dataloader_prefetch_factor = None,
|
| 146 |
+
past_index = -1,
|
| 147 |
+
run_name = None,
|
| 148 |
+
disable_tqdm = None,
|
| 149 |
+
remove_unused_columns = True,
|
| 150 |
+
label_names = None,
|
| 151 |
+
load_best_model_at_end = False,
|
| 152 |
+
metric_for_best_model = None,
|
| 153 |
+
greater_is_better = None,
|
| 154 |
+
ignore_data_skip = False,
|
| 155 |
+
fsdp = '',
|
| 156 |
+
fsdp_min_num_params = 0,
|
| 157 |
+
fsdp_config = None,
|
| 158 |
+
tp_size = 0,
|
| 159 |
+
fsdp_transformer_layer_cls_to_wrap = None,
|
| 160 |
+
accelerator_config = None,
|
| 161 |
+
deepspeed = None,
|
| 162 |
+
label_smoothing_factor = 0.0,
|
| 163 |
+
optim = 'adamw_8bit',
|
| 164 |
+
optim_args = None,
|
| 165 |
+
adafactor = False,
|
| 166 |
+
group_by_length = False,
|
| 167 |
+
length_column_name = 'length',
|
| 168 |
+
report_to = None,
|
| 169 |
+
ddp_find_unused_parameters = None,
|
| 170 |
+
ddp_bucket_cap_mb = None,
|
| 171 |
+
ddp_broadcast_buffers = None,
|
| 172 |
+
dataloader_pin_memory = True,
|
| 173 |
+
dataloader_persistent_workers = False,
|
| 174 |
+
skip_memory_metrics = True,
|
| 175 |
+
use_legacy_prediction_loop = False,
|
| 176 |
+
push_to_hub = False,
|
| 177 |
+
resume_from_checkpoint = None,
|
| 178 |
+
hub_model_id = None,
|
| 179 |
+
hub_strategy = 'every_save',
|
| 180 |
+
hub_token = None,
|
| 181 |
+
hub_private_repo = None,
|
| 182 |
+
hub_always_push = False,
|
| 183 |
+
gradient_checkpointing = False,
|
| 184 |
+
gradient_checkpointing_kwargs = None,
|
| 185 |
+
include_inputs_for_metrics = False,
|
| 186 |
+
eval_do_concat_batches = True,
|
| 187 |
+
fp16_backend = 'auto',
|
| 188 |
+
push_to_hub_model_id = None,
|
| 189 |
+
push_to_hub_organization = None,
|
| 190 |
+
push_to_hub_token = None,
|
| 191 |
+
mp_parameters = '',
|
| 192 |
+
auto_find_batch_size = False,
|
| 193 |
+
full_determinism = False,
|
| 194 |
+
torchdynamo = None,
|
| 195 |
+
ray_scope = 'last',
|
| 196 |
+
ddp_timeout = 1800,
|
| 197 |
+
torch_compile = False,
|
| 198 |
+
torch_compile_backend = None,
|
| 199 |
+
torch_compile_mode = None,
|
| 200 |
+
include_tokens_per_second = False,
|
| 201 |
+
include_num_input_tokens_seen = False,
|
| 202 |
+
neftune_noise_alpha = None,
|
| 203 |
+
optim_target_modules = None,
|
| 204 |
+
batch_eval_metrics = False,
|
| 205 |
+
eval_on_start = False,
|
| 206 |
+
use_liger_kernel = False,
|
| 207 |
+
eval_use_gather_object = False,
|
| 208 |
+
average_tokens_across_devices = False,
|
| 209 |
+
model_init_kwargs = None,
|
| 210 |
+
use_liger = False,
|
| 211 |
+
dataset_text_field = 'text',
|
| 212 |
+
dataset_kwargs = None,
|
| 213 |
+
dataset_num_proc = None,
|
| 214 |
+
max_seq_length = None,
|
| 215 |
+
packing = False,
|
| 216 |
+
eval_packing = None,
|
| 217 |
+
dataset_batch_size = None,
|
| 218 |
+
num_of_sequences = None,
|
| 219 |
+
chars_per_token = None,
|
| 220 |
+
temperature = 0.9,
|
| 221 |
+
lmbda = 0.5,
|
| 222 |
+
beta = 0.5,
|
| 223 |
+
max_new_tokens = 128,
|
| 224 |
+
teacher_model_name_or_path = None,
|
| 225 |
+
teacher_model_init_kwargs = None,
|
| 226 |
+
disable_dropout = True,
|
| 227 |
+
seq_kd = False,
|
| 228 |
+
vllm_sampling_params = None,
|
| 229 |
+
unsloth_num_chunks = -1,
|
| 230 |
+
**kwargs,
|
| 231 |
+
):
|
| 232 |
+
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
| 233 |
+
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
| 234 |
+
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
| 235 |
+
output_dir = 'unsloth_training_checkpoints'
|
| 236 |
+
save_strategy = 'no'
|
| 237 |
+
if dataset_num_proc is None:
|
| 238 |
+
from multiprocessing import cpu_count
|
| 239 |
+
dataset_num_proc = cpu_count()
|
| 240 |
+
|
| 241 |
+
super().__init__(
|
| 242 |
+
output_dir = output_dir,
|
| 243 |
+
overwrite_output_dir = overwrite_output_dir,
|
| 244 |
+
do_train = do_train,
|
| 245 |
+
do_eval = do_eval,
|
| 246 |
+
do_predict = do_predict,
|
| 247 |
+
eval_strategy = eval_strategy,
|
| 248 |
+
prediction_loss_only = prediction_loss_only,
|
| 249 |
+
per_device_train_batch_size = per_device_train_batch_size,
|
| 250 |
+
per_device_eval_batch_size = per_device_eval_batch_size,
|
| 251 |
+
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
| 252 |
+
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
| 253 |
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 254 |
+
eval_accumulation_steps = eval_accumulation_steps,
|
| 255 |
+
eval_delay = eval_delay,
|
| 256 |
+
torch_empty_cache_steps = torch_empty_cache_steps,
|
| 257 |
+
learning_rate = learning_rate,
|
| 258 |
+
weight_decay = weight_decay,
|
| 259 |
+
adam_beta1 = adam_beta1,
|
| 260 |
+
adam_beta2 = adam_beta2,
|
| 261 |
+
adam_epsilon = adam_epsilon,
|
| 262 |
+
max_grad_norm = max_grad_norm,
|
| 263 |
+
num_train_epochs = num_train_epochs,
|
| 264 |
+
max_steps = max_steps,
|
| 265 |
+
lr_scheduler_type = lr_scheduler_type,
|
| 266 |
+
warmup_ratio = warmup_ratio,
|
| 267 |
+
warmup_steps = warmup_steps,
|
| 268 |
+
log_level = log_level,
|
| 269 |
+
log_level_replica = log_level_replica,
|
| 270 |
+
log_on_each_node = log_on_each_node,
|
| 271 |
+
logging_dir = logging_dir,
|
| 272 |
+
logging_strategy = logging_strategy,
|
| 273 |
+
logging_first_step = logging_first_step,
|
| 274 |
+
logging_steps = logging_steps,
|
| 275 |
+
logging_nan_inf_filter = logging_nan_inf_filter,
|
| 276 |
+
save_strategy = save_strategy,
|
| 277 |
+
save_steps = save_steps,
|
| 278 |
+
save_total_limit = save_total_limit,
|
| 279 |
+
save_safetensors = save_safetensors,
|
| 280 |
+
save_on_each_node = save_on_each_node,
|
| 281 |
+
save_only_model = save_only_model,
|
| 282 |
+
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
| 283 |
+
no_cuda = no_cuda,
|
| 284 |
+
use_cpu = use_cpu,
|
| 285 |
+
use_mps_device = use_mps_device,
|
| 286 |
+
seed = seed,
|
| 287 |
+
data_seed = data_seed,
|
| 288 |
+
jit_mode_eval = jit_mode_eval,
|
| 289 |
+
use_ipex = use_ipex,
|
| 290 |
+
bf16 = bf16,
|
| 291 |
+
fp16 = fp16,
|
| 292 |
+
fp16_opt_level = fp16_opt_level,
|
| 293 |
+
half_precision_backend = half_precision_backend,
|
| 294 |
+
bf16_full_eval = bf16_full_eval,
|
| 295 |
+
fp16_full_eval = fp16_full_eval,
|
| 296 |
+
tf32 = tf32,
|
| 297 |
+
local_rank = local_rank,
|
| 298 |
+
ddp_backend = ddp_backend,
|
| 299 |
+
tpu_num_cores = tpu_num_cores,
|
| 300 |
+
tpu_metrics_debug = tpu_metrics_debug,
|
| 301 |
+
debug = debug,
|
| 302 |
+
dataloader_drop_last = dataloader_drop_last,
|
| 303 |
+
eval_steps = eval_steps,
|
| 304 |
+
dataloader_num_workers = dataloader_num_workers,
|
| 305 |
+
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
| 306 |
+
past_index = past_index,
|
| 307 |
+
run_name = run_name,
|
| 308 |
+
disable_tqdm = disable_tqdm,
|
| 309 |
+
remove_unused_columns = remove_unused_columns,
|
| 310 |
+
label_names = label_names,
|
| 311 |
+
load_best_model_at_end = load_best_model_at_end,
|
| 312 |
+
metric_for_best_model = metric_for_best_model,
|
| 313 |
+
greater_is_better = greater_is_better,
|
| 314 |
+
ignore_data_skip = ignore_data_skip,
|
| 315 |
+
fsdp = fsdp,
|
| 316 |
+
fsdp_min_num_params = fsdp_min_num_params,
|
| 317 |
+
fsdp_config = fsdp_config,
|
| 318 |
+
tp_size = tp_size,
|
| 319 |
+
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
| 320 |
+
accelerator_config = accelerator_config,
|
| 321 |
+
deepspeed = deepspeed,
|
| 322 |
+
label_smoothing_factor = label_smoothing_factor,
|
| 323 |
+
optim = optim,
|
| 324 |
+
optim_args = optim_args,
|
| 325 |
+
adafactor = adafactor,
|
| 326 |
+
group_by_length = group_by_length,
|
| 327 |
+
length_column_name = length_column_name,
|
| 328 |
+
report_to = report_to,
|
| 329 |
+
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
| 330 |
+
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
| 331 |
+
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
| 332 |
+
dataloader_pin_memory = dataloader_pin_memory,
|
| 333 |
+
dataloader_persistent_workers = dataloader_persistent_workers,
|
| 334 |
+
skip_memory_metrics = skip_memory_metrics,
|
| 335 |
+
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
| 336 |
+
push_to_hub = push_to_hub,
|
| 337 |
+
resume_from_checkpoint = resume_from_checkpoint,
|
| 338 |
+
hub_model_id = hub_model_id,
|
| 339 |
+
hub_strategy = hub_strategy,
|
| 340 |
+
hub_token = hub_token,
|
| 341 |
+
hub_private_repo = hub_private_repo,
|
| 342 |
+
hub_always_push = hub_always_push,
|
| 343 |
+
gradient_checkpointing = gradient_checkpointing,
|
| 344 |
+
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
| 345 |
+
include_inputs_for_metrics = include_inputs_for_metrics,
|
| 346 |
+
eval_do_concat_batches = eval_do_concat_batches,
|
| 347 |
+
fp16_backend = fp16_backend,
|
| 348 |
+
push_to_hub_model_id = push_to_hub_model_id,
|
| 349 |
+
push_to_hub_organization = push_to_hub_organization,
|
| 350 |
+
push_to_hub_token = push_to_hub_token,
|
| 351 |
+
mp_parameters = mp_parameters,
|
| 352 |
+
auto_find_batch_size = auto_find_batch_size,
|
| 353 |
+
full_determinism = full_determinism,
|
| 354 |
+
torchdynamo = torchdynamo,
|
| 355 |
+
ray_scope = ray_scope,
|
| 356 |
+
ddp_timeout = ddp_timeout,
|
| 357 |
+
torch_compile = torch_compile,
|
| 358 |
+
torch_compile_backend = torch_compile_backend,
|
| 359 |
+
torch_compile_mode = torch_compile_mode,
|
| 360 |
+
include_tokens_per_second = include_tokens_per_second,
|
| 361 |
+
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
| 362 |
+
neftune_noise_alpha = neftune_noise_alpha,
|
| 363 |
+
optim_target_modules = optim_target_modules,
|
| 364 |
+
batch_eval_metrics = batch_eval_metrics,
|
| 365 |
+
eval_on_start = eval_on_start,
|
| 366 |
+
use_liger_kernel = use_liger_kernel,
|
| 367 |
+
eval_use_gather_object = eval_use_gather_object,
|
| 368 |
+
average_tokens_across_devices = average_tokens_across_devices,
|
| 369 |
+
model_init_kwargs = model_init_kwargs,
|
| 370 |
+
use_liger = use_liger,
|
| 371 |
+
dataset_text_field = dataset_text_field,
|
| 372 |
+
dataset_kwargs = dataset_kwargs,
|
| 373 |
+
dataset_num_proc = dataset_num_proc,
|
| 374 |
+
max_seq_length = max_seq_length,
|
| 375 |
+
packing = packing,
|
| 376 |
+
eval_packing = eval_packing,
|
| 377 |
+
dataset_batch_size = dataset_batch_size,
|
| 378 |
+
num_of_sequences = num_of_sequences,
|
| 379 |
+
chars_per_token = chars_per_token,
|
| 380 |
+
temperature = temperature,
|
| 381 |
+
lmbda = lmbda,
|
| 382 |
+
beta = beta,
|
| 383 |
+
max_new_tokens = max_new_tokens,
|
| 384 |
+
teacher_model_name_or_path = teacher_model_name_or_path,
|
| 385 |
+
teacher_model_init_kwargs = teacher_model_init_kwargs,
|
| 386 |
+
disable_dropout = disable_dropout,
|
| 387 |
+
seq_kd = seq_kd,**kwargs)
|
| 388 |
+
self.vllm_sampling_params = vllm_sampling_params
|
| 389 |
+
self.unsloth_num_chunks = unsloth_num_chunks
|
| 390 |
+
pass
|
| 391 |
+
|
| 392 |
+
class _UnslothGKDTrainer(SFTTrainer):
|
| 393 |
+
_tag_names = ["trl", "gkd"]
|
| 394 |
+
|
| 395 |
+
def __init__(
|
| 396 |
+
self,
|
| 397 |
+
model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
|
| 398 |
+
teacher_model: Union[PreTrainedModel, nn.Module, str] = None,
|
| 399 |
+
args: Optional[GKDConfig] = None,
|
| 400 |
+
data_collator: Optional[DataCollator] = None, # type: ignore
|
| 401 |
+
train_dataset: Optional[Dataset] = None,
|
| 402 |
+
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
|
| 403 |
+
processing_class: Optional[
|
| 404 |
+
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
|
| 405 |
+
] = None,
|
| 406 |
+
compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None,
|
| 407 |
+
callbacks: Optional[list[TrainerCallback]] = None,
|
| 408 |
+
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
| 409 |
+
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
| 410 |
+
peft_config: Optional["PeftConfig"] = None,
|
| 411 |
+
formatting_func: Optional[Callable] = None,
|
| 412 |
+
):
|
| 413 |
+
# add remove_unused_columns=False to the dataclass args
|
| 414 |
+
args.remove_unused_columns = False
|
| 415 |
+
data_collator = DataCollatorForChatML(tokenizer=processing_class, max_length=args.max_seq_length)
|
| 416 |
+
|
| 417 |
+
super().__init__(
|
| 418 |
+
model,
|
| 419 |
+
args=args,
|
| 420 |
+
data_collator=data_collator,
|
| 421 |
+
train_dataset=train_dataset,
|
| 422 |
+
eval_dataset=eval_dataset,
|
| 423 |
+
processing_class=processing_class,
|
| 424 |
+
compute_metrics=compute_metrics,
|
| 425 |
+
callbacks=callbacks,
|
| 426 |
+
optimizers=optimizers,
|
| 427 |
+
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
| 428 |
+
peft_config=peft_config,
|
| 429 |
+
formatting_func=formatting_func,
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
if args.teacher_model_init_kwargs is None:
|
| 433 |
+
teacher_model_init_kwargs = {}
|
| 434 |
+
elif not isinstance(teacher_model, str):
|
| 435 |
+
raise ValueError(
|
| 436 |
+
"You passed teacher_model_init_kwargs to the GKDConfig, but your teacher_model is already instantiated."
|
| 437 |
+
)
|
| 438 |
+
else:
|
| 439 |
+
teacher_model_init_kwargs = args.teacher_model_init_kwargs
|
| 440 |
+
teacher_model_init_kwargs["torch_dtype"] = (
|
| 441 |
+
teacher_model_init_kwargs["torch_dtype"]
|
| 442 |
+
if teacher_model_init_kwargs["torch_dtype"] in ["auto", None]
|
| 443 |
+
else getattr(torch, teacher_model_init_kwargs["torch_dtype"])
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
if isinstance(teacher_model, str):
|
| 447 |
+
if args.use_liger:
|
| 448 |
+
teacher_model = AutoLigerKernelForCausalLM.from_pretrained(teacher_model, **teacher_model_init_kwargs)
|
| 449 |
+
else:
|
| 450 |
+
teacher_model = AutoModelForCausalLM.from_pretrained(teacher_model, **teacher_model_init_kwargs)
|
| 451 |
+
|
| 452 |
+
# Disable dropout in the model
|
| 453 |
+
if args.disable_dropout:
|
| 454 |
+
disable_dropout_in_model(self.model)
|
| 455 |
+
|
| 456 |
+
if self.is_deepspeed_enabled:
|
| 457 |
+
self.teacher_model = self._prepare_deepspeed(teacher_model)
|
| 458 |
+
else:
|
| 459 |
+
self.teacher_model = self.accelerator.prepare_model(teacher_model, evaluation_mode=True)
|
| 460 |
+
|
| 461 |
+
self.lmbda = args.lmbda
|
| 462 |
+
self.beta = args.beta
|
| 463 |
+
self.temperature = args.temperature
|
| 464 |
+
self.seq_kd = args.seq_kd
|
| 465 |
+
|
| 466 |
+
self.generation_config = GenerationConfig(
|
| 467 |
+
max_new_tokens=args.max_new_tokens,
|
| 468 |
+
temperature=args.temperature,
|
| 469 |
+
do_sample=True,
|
| 470 |
+
top_k=0,
|
| 471 |
+
use_cache=False if args.gradient_checkpointing else True,
|
| 472 |
+
pad_token_id=self.processing_class.pad_token_id,
|
| 473 |
+
)
|
| 474 |
+
# Set custom EOS tokens if they are specified by the model's generation
|
| 475 |
+
# config. This is important for models with the Llama 3 chat template,
|
| 476 |
+
# which use special tokens <|eot_id|> and <|eom_id|> to mark the end of
|
| 477 |
+
# turns or messages.
|
| 478 |
+
if (
|
| 479 |
+
hasattr(self.model.generation_config, "eos_token_id")
|
| 480 |
+
and self.model.generation_config.eos_token_id is not None
|
| 481 |
+
):
|
| 482 |
+
self.generation_config.eos_token_id = self.model.generation_config.eos_token_id
|
| 483 |
+
|
| 484 |
+
def _prepare_dataset(self, dataset, *args):
|
| 485 |
+
# SFTTrainer._prepare_dataset() applies the chat template and rename the messages column to text. However, we
|
| 486 |
+
# need to keep the messages column as it is. We use the following workaround to keep the messages column.
|
| 487 |
+
dataset = dataset.add_column("_messages", dataset["messages"])
|
| 488 |
+
dataset = super()._prepare_dataset(dataset, *args)
|
| 489 |
+
dataset = dataset.rename_column("_messages", "messages")
|
| 490 |
+
return dataset
|
| 491 |
+
|
| 492 |
+
@staticmethod
|
| 493 |
+
def generalized_jsd_loss(
|
| 494 |
+
student_logits, teacher_logits, labels=None, beta=0.5, temperature=1.0, reduction="batchmean"
|
| 495 |
+
):
|
| 496 |
+
"""
|
| 497 |
+
Compute the generalized Jensen-Shannon Divergence loss for knowledge distillation using F.kl_div. See Eq. (1)
|
| 498 |
+
of https://huggingface.co/papers/2306.13649 for the definition.
|
| 499 |
+
|
| 500 |
+
Args:
|
| 501 |
+
student_logits: Tensor of shape (batch_size, sequence_length, vocab_size)
|
| 502 |
+
teacher_logits: Tensor of shape (batch_size, sequence_length, vocab_size)
|
| 503 |
+
labels: Tensor of shape (batch_size, sequence_length) with -100 for padding tokens to ignore when computing loss
|
| 504 |
+
beta: Interpolation coefficient between 0 and 1 (default: 0.5)
|
| 505 |
+
temperature: Softmax temperature (default: 1.0)
|
| 506 |
+
reduction: Specifies the reduction to apply to the output (default: 'batchmean')
|
| 507 |
+
|
| 508 |
+
Returns:
|
| 509 |
+
loss: Scalar tensor with the generalized JSD loss
|
| 510 |
+
"""
|
| 511 |
+
|
| 512 |
+
# Apply temperature scaling
|
| 513 |
+
student_logits = student_logits / temperature
|
| 514 |
+
teacher_logits = teacher_logits / temperature
|
| 515 |
+
|
| 516 |
+
# Compute log probabilities for student and probabilities for teacher
|
| 517 |
+
student_log_probs = F.log_softmax(student_logits, dim=-1)
|
| 518 |
+
teacher_log_probs = F.log_softmax(teacher_logits, dim=-1)
|
| 519 |
+
|
| 520 |
+
# Compute the log of the mixture distribution
|
| 521 |
+
# log(a + b) = log(exp(log(a)) + exp(log(b))) -> for mixture
|
| 522 |
+
beta = torch.tensor(beta, dtype=student_log_probs.dtype)
|
| 523 |
+
mixture_log_probs = torch.logsumexp(
|
| 524 |
+
torch.stack([student_log_probs + torch.log(beta), teacher_log_probs + torch.log(1 - beta)]),
|
| 525 |
+
dim=0,
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
# Compute KL divergences using F.kl_div
|
| 529 |
+
# PyTorch differs from the standard mathematical definition, so the order of the probability distributions is swapped compared to that defined in the paper.
|
| 530 |
+
kl_teacher = F.kl_div(mixture_log_probs, teacher_log_probs, reduction="none", log_target=True)
|
| 531 |
+
kl_student = F.kl_div(mixture_log_probs, student_log_probs, reduction="none", log_target=True)
|
| 532 |
+
|
| 533 |
+
# Compute the Generalized Jensen-Shannon Divergence
|
| 534 |
+
jsd = beta * kl_teacher + (1 - beta) * kl_student
|
| 535 |
+
|
| 536 |
+
# Masking
|
| 537 |
+
if labels is not None:
|
| 538 |
+
mask = labels != -100
|
| 539 |
+
jsd = jsd[mask]
|
| 540 |
+
|
| 541 |
+
# Apply reduction
|
| 542 |
+
if reduction == "batchmean":
|
| 543 |
+
return jsd.sum() / mask.sum() if labels is not None else jsd.sum() / (jsd.size(0) * jsd.size(1))
|
| 544 |
+
elif reduction == "sum":
|
| 545 |
+
return jsd.sum()
|
| 546 |
+
elif reduction == "mean":
|
| 547 |
+
return jsd.mean()
|
| 548 |
+
else:
|
| 549 |
+
return jsd
|
| 550 |
+
|
| 551 |
+
def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
|
| 552 |
+
# compute student output
|
| 553 |
+
outputs_student = model(
|
| 554 |
+
input_ids=inputs["input_ids"],
|
| 555 |
+
attention_mask=inputs["attention_mask"],
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
# compute teacher output in eval mode
|
| 559 |
+
self.teacher_model.eval()
|
| 560 |
+
with torch.no_grad():
|
| 561 |
+
outputs_teacher = self.teacher_model(
|
| 562 |
+
input_ids=inputs["input_ids"],
|
| 563 |
+
attention_mask=inputs["attention_mask"],
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
# slice the logits for the generated tokens using the inputs["prompts"] lengths
|
| 567 |
+
prompt_lengths = inputs["prompts"].shape[1]
|
| 568 |
+
shifted_student_logits = outputs_student.logits[:, prompt_lengths - 1 : -1, :]
|
| 569 |
+
shifted_teacher_logits = outputs_teacher.logits[:, prompt_lengths - 1 : -1, :]
|
| 570 |
+
shifted_labels = inputs["labels"][:, prompt_lengths:]
|
| 571 |
+
|
| 572 |
+
# compute loss
|
| 573 |
+
loss = self.generalized_jsd_loss(
|
| 574 |
+
student_logits=shifted_student_logits,
|
| 575 |
+
teacher_logits=shifted_teacher_logits,
|
| 576 |
+
labels=shifted_labels,
|
| 577 |
+
beta=self.beta,
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
# empty cache
|
| 581 |
+
empty_cache()
|
| 582 |
+
|
| 583 |
+
# Return loss
|
| 584 |
+
return (loss, outputs_student) if return_outputs else loss
|
| 585 |
+
|
| 586 |
+
@staticmethod
|
| 587 |
+
def generate_on_policy_outputs(model, inputs, generation_config, pad_token_id=None):
|
| 588 |
+
# Generate output with respect to the prompt only
|
| 589 |
+
generated_outputs = model.generate(
|
| 590 |
+
input_ids=inputs["prompts"],
|
| 591 |
+
attention_mask=inputs.get("prompt_attention_mask", None),
|
| 592 |
+
generation_config=generation_config,
|
| 593 |
+
return_dict_in_generate=True,
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
# Get the generated token IDs
|
| 597 |
+
generated_tokens = generated_outputs.sequences
|
| 598 |
+
# Calculate new attention mask
|
| 599 |
+
new_attention_mask = torch.ones_like(generated_tokens)
|
| 600 |
+
new_labels = generated_tokens.clone()
|
| 601 |
+
|
| 602 |
+
# If there's pad_token_id, set attention mask to 0 for padding tokens
|
| 603 |
+
if pad_token_id is not None:
|
| 604 |
+
new_labels[new_labels == pad_token_id] = -100
|
| 605 |
+
new_attention_mask[generated_tokens == pad_token_id] = 0
|
| 606 |
+
|
| 607 |
+
return generated_tokens, new_attention_mask, new_labels
|
| 608 |
+
|
| 609 |
+
def training_step(
|
| 610 |
+
self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None
|
| 611 |
+
) -> torch.Tensor:
|
| 612 |
+
"""
|
| 613 |
+
Perform a training step for the Generalized Knowledge Distillation (GKD) model.
|
| 614 |
+
|
| 615 |
+
This method implements the on-policy learning approach described in the GKD paper.
|
| 616 |
+
With probability `self.lmbda`, it generates new responses using the student model,
|
| 617 |
+
which are then used for training instead of the original inputs.
|
| 618 |
+
"""
|
| 619 |
+
if self.seq_kd:
|
| 620 |
+
with unwrap_model_for_generation(self.teacher_model, self.accelerator) as unwrapped_model:
|
| 621 |
+
new_input_ids, new_attention_mask, new_labels = self.generate_on_policy_outputs(
|
| 622 |
+
unwrapped_model, inputs, self.generation_config, self.processing_class.pad_token_id
|
| 623 |
+
)
|
| 624 |
+
inputs["input_ids"] = new_input_ids
|
| 625 |
+
inputs["attention_mask"] = new_attention_mask
|
| 626 |
+
inputs["labels"] = new_labels
|
| 627 |
+
if random.random() <= self.lmbda:
|
| 628 |
+
with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model:
|
| 629 |
+
new_input_ids, new_attention_mask, new_labels = self.generate_on_policy_outputs(
|
| 630 |
+
unwrapped_model, inputs, self.generation_config, self.processing_class.pad_token_id
|
| 631 |
+
)
|
| 632 |
+
inputs["input_ids"] = new_input_ids
|
| 633 |
+
inputs["attention_mask"] = new_attention_mask
|
| 634 |
+
inputs["labels"] = new_labels
|
| 635 |
+
|
| 636 |
+
loss = super().training_step(model, inputs, num_items_in_batch)
|
| 637 |
+
return loss
|
| 638 |
+
|
| 639 |
+
def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
|
| 640 |
+
# Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
|
| 641 |
+
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
|
| 642 |
+
config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config)
|
| 643 |
+
|
| 644 |
+
if model is not None:
|
| 645 |
+
if hasattr(model, "config"):
|
| 646 |
+
hidden_size = (
|
| 647 |
+
max(model.config.hidden_sizes)
|
| 648 |
+
if getattr(model.config, "hidden_sizes", None)
|
| 649 |
+
else getattr(model.config, "hidden_size", None)
|
| 650 |
+
)
|
| 651 |
+
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
|
| 652 |
+
# Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
|
| 653 |
+
# This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
|
| 654 |
+
config_kwargs.update(
|
| 655 |
+
{
|
| 656 |
+
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
|
| 657 |
+
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
|
| 658 |
+
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
|
| 659 |
+
}
|
| 660 |
+
)
|
| 661 |
+
|
| 662 |
+
# If ZeRO-3 is used, we shard both the active and reference model.
|
| 663 |
+
# Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
|
| 664 |
+
if config_kwargs["zero_optimization"]["stage"] != 3:
|
| 665 |
+
config_kwargs["zero_optimization"]["stage"] = 0
|
| 666 |
+
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
|
| 667 |
+
model.eval()
|
| 668 |
+
return model
|
| 669 |
+
|
| 670 |
+
def create_model_card(
|
| 671 |
+
self,
|
| 672 |
+
model_name: Optional[str] = None,
|
| 673 |
+
dataset_name: Optional[str] = None,
|
| 674 |
+
tags: Union[str, list[str], None] = None,
|
| 675 |
+
):
|
| 676 |
+
"""
|
| 677 |
+
Creates a draft of a model card using the information available to the `Trainer`.
|
| 678 |
+
|
| 679 |
+
Args:
|
| 680 |
+
model_name (`str` or `None`, *optional*, defaults to `None`):
|
| 681 |
+
Name of the model.
|
| 682 |
+
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
| 683 |
+
Name of the dataset used for training.
|
| 684 |
+
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
| 685 |
+
Tags to be associated with the model card.
|
| 686 |
+
"""
|
| 687 |
+
if not self.is_world_process_zero():
|
| 688 |
+
return
|
| 689 |
+
|
| 690 |
+
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
| 691 |
+
base_model = self.model.config._name_or_path
|
| 692 |
+
else:
|
| 693 |
+
base_model = None
|
| 694 |
+
|
| 695 |
+
tags = tags or []
|
| 696 |
+
if isinstance(tags, str):
|
| 697 |
+
tags = [tags]
|
| 698 |
+
|
| 699 |
+
if hasattr(self.model.config, "unsloth_version"):
|
| 700 |
+
tags.append("unsloth")
|
| 701 |
+
|
| 702 |
+
citation = textwrap.dedent("""\
|
| 703 |
+
@inproceedings{agarwal2024on-policy,
|
| 704 |
+
title = {{On-Policy Distillation of Language Models: Learning from Self-Generated Mistakes}},
|
| 705 |
+
author = {Rishabh Agarwal and Nino Vieillard and Yongchao Zhou and Piotr Stanczyk and Sabela Ramos Garea and Matthieu Geist and Olivier Bachem},
|
| 706 |
+
year = 2024,
|
| 707 |
+
booktitle = {The Twelfth International Conference on Learning Representations, {ICLR} 2024, Vienna, Austria, May 7-11, 2024},
|
| 708 |
+
publisher = {OpenReview.net},
|
| 709 |
+
url = {https://openreview.net/forum?id=3zKtaqxLhW},
|
| 710 |
+
}""")
|
| 711 |
+
|
| 712 |
+
model_card = generate_model_card(
|
| 713 |
+
base_model=base_model,
|
| 714 |
+
model_name=model_name,
|
| 715 |
+
hub_model_id=self.hub_model_id,
|
| 716 |
+
dataset_name=dataset_name,
|
| 717 |
+
tags=tags,
|
| 718 |
+
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
| 719 |
+
comet_url=get_comet_experiment_url(),
|
| 720 |
+
trainer_name="GKD",
|
| 721 |
+
trainer_citation=citation,
|
| 722 |
+
paper_title="On-Policy Distillation of Language Models: Learning from Self-Generated Mistakes",
|
| 723 |
+
paper_id="2306.13649",
|
| 724 |
+
)
|
| 725 |
+
|
| 726 |
+
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
| 727 |
+
class UnslothGKDTrainer(_UnslothGKDTrainer):
|
| 728 |
+
"""
|
| 729 |
+
|
| 730 |
+
"""
|
| 731 |
+
def __init__(
|
| 732 |
+
self,
|
| 733 |
+
model = None,
|
| 734 |
+
teacher_model = None,
|
| 735 |
+
args = None,
|
| 736 |
+
data_collator = None,
|
| 737 |
+
train_dataset = None,
|
| 738 |
+
eval_dataset = None,
|
| 739 |
+
processing_class = None,
|
| 740 |
+
compute_metrics = None,
|
| 741 |
+
callbacks = None,
|
| 742 |
+
preprocess_logits_for_metrics = None,
|
| 743 |
+
peft_config = None,
|
| 744 |
+
formatting_func = None,
|
| 745 |
+
**kwargs
|
| 746 |
+
):
|
| 747 |
+
if args is None: args = UnslothGKDConfig()
|
| 748 |
+
use_bf16 = getattr(args, 'bf16', False)
|
| 749 |
+
use_fp16 = getattr(args, 'fp16', False)
|
| 750 |
+
force_float32 = False
|
| 751 |
+
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
| 752 |
+
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
| 753 |
+
force_float32 = True
|
| 754 |
+
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
| 755 |
+
dtype = getattr(model.config, 'torch_dtype', None)
|
| 756 |
+
if dtype is None: dtype = model.get_input_embeddings().dtype
|
| 757 |
+
from unsloth_zoo.utils import _get_dtype
|
| 758 |
+
dtype = _get_dtype(dtype)
|
| 759 |
+
float16 = dtype == torch.float16
|
| 760 |
+
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
| 761 |
+
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
| 762 |
+
if force_float32:
|
| 763 |
+
args.fp16 = False
|
| 764 |
+
args.bf16 = False
|
| 765 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
| 766 |
+
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
| 767 |
+
args.fp16 = float16
|
| 768 |
+
args.bf16 = not float16
|
| 769 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
| 770 |
+
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
| 771 |
+
args.eval_strategy = 'steps'
|
| 772 |
+
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
| 773 |
+
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
| 774 |
+
if ga_steps is not None and ga_steps > 1:
|
| 775 |
+
from transformers import __version__ as transformers_version
|
| 776 |
+
if Version(transformers_version) <= Version('4.45.2'):
|
| 777 |
+
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
| 778 |
+
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
| 779 |
+
if getattr(args, 'eval_strategy', 'no') != 'no':
|
| 780 |
+
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
| 781 |
+
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
| 782 |
+
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
| 783 |
+
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
| 784 |
+
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
| 785 |
+
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
| 786 |
+
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
| 787 |
+
if force_float32:
|
| 788 |
+
args.bf16_full_eval = False
|
| 789 |
+
args.fp16_full_eval = False
|
| 790 |
+
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
| 791 |
+
args.bf16_full_eval = True
|
| 792 |
+
args.fp16_full_eval = False
|
| 793 |
+
elif not bf16_full_eval and not fp16_full_eval:
|
| 794 |
+
args.bf16_full_eval = args.bf16
|
| 795 |
+
args.fp16_full_eval = args.fp16
|
| 796 |
+
_output_logits = False
|
| 797 |
+
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
| 798 |
+
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
| 799 |
+
if _output_logits:
|
| 800 |
+
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
| 801 |
+
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
| 802 |
+
pass
|
| 803 |
+
else:
|
| 804 |
+
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
| 805 |
+
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
| 806 |
+
if args_max_seq_length is None and model_max_seq_length is not None:
|
| 807 |
+
max_seq_length = model.max_seq_length
|
| 808 |
+
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
| 809 |
+
if model is not None and hasattr(model, 'for_training'):
|
| 810 |
+
model.for_training()
|
| 811 |
+
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
| 812 |
+
if 'processing_class' in locals():
|
| 813 |
+
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
| 814 |
+
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
| 815 |
+
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
| 816 |
+
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
| 817 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 818 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
| 819 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer, mlm = False)
|
| 820 |
+
elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
| 821 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer)
|
| 822 |
+
else:
|
| 823 |
+
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
| 824 |
+
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
| 825 |
+
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
| 826 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 827 |
+
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
| 828 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
| 829 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer)
|
| 830 |
+
else:
|
| 831 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False)
|
| 832 |
+
other_metrics = []
|
| 833 |
+
|
| 834 |
+
from unsloth_zoo.logging_utils import PatchRLStatistics
|
| 835 |
+
PatchRLStatistics('gkd_trainer', other_metrics)
|
| 836 |
+
|
| 837 |
+
super().__init__(
|
| 838 |
+
model = model,
|
| 839 |
+
teacher_model = teacher_model,
|
| 840 |
+
args = args,
|
| 841 |
+
data_collator = data_collator,
|
| 842 |
+
train_dataset = train_dataset,
|
| 843 |
+
eval_dataset = eval_dataset,
|
| 844 |
+
processing_class = processing_class,
|
| 845 |
+
compute_metrics = compute_metrics,
|
| 846 |
+
callbacks = callbacks,
|
| 847 |
+
preprocess_logits_for_metrics = preprocess_logits_for_metrics,
|
| 848 |
+
peft_config = peft_config,
|
| 849 |
+
formatting_func = formatting_func,**kwargs)
|
| 850 |
+
if hasattr(self, 'neftune_hook_handle'):
|
| 851 |
+
self.neftune_hook_handle.remove()
|
| 852 |
+
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
| 853 |
+
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
| 854 |
+
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
| 855 |
+
pass
|
| 856 |
+
|
| 857 |
+
pass
|
unsloth_compiled_cache/UnslothGRPOTrainer.py
ADDED
|
@@ -0,0 +1,1432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
2025.4.1
|
| 3 |
+
2025.4.1
|
| 4 |
+
4.51.3
|
| 5 |
+
0.15.2
|
| 6 |
+
__UNSLOTH_VERSIONING__
|
| 7 |
+
"""
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from torch.nn import functional as F
|
| 12 |
+
from trl.trainer.grpo_trainer import (Any, AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, Dataset, GRPOConfig, GRPOTrainer, GenerationConfig, IterableDataset, Optional, PeftConfig, PreTrainedModel, PreTrainedTokenizerBase, RepeatRandomSampler, RewardFunc, Sampler, SyncRefModelCallback, Trainer, TrainerCallback, Union, apply_chat_template, broadcast_object_list, create_reference_model, defaultdict, gather, gather_object, generate_model_card, get_comet_experiment_url, is_conversational, is_deepspeed_zero3_enabled, is_peft_model, is_wandb_available, maybe_apply_chat_template, nn, os, pad, prepare_deepspeed, set_seed, textwrap, torch, transformers, unwrap_model_for_generation, version, warnings, os, torch, transformers, Any, Union, apply_chat_template, broadcast_object_list, gather, gather_object, is_conversational, maybe_apply_chat_template, nn, os, pad, torch, unwrap_model_for_generation, GRPOTrainer, Trainer, gather, os, torch)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from typing import *
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from packaging.version import Version
|
| 19 |
+
import torch
|
| 20 |
+
import numpy as np
|
| 21 |
+
from contextlib import nullcontext
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling
|
| 24 |
+
|
| 25 |
+
torch_compile_options = {
|
| 26 |
+
"epilogue_fusion" : True,
|
| 27 |
+
"max_autotune" : False,
|
| 28 |
+
"shape_padding" : True,
|
| 29 |
+
"trace.enabled" : False,
|
| 30 |
+
"triton.cudagraphs" : False,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
| 34 |
+
def selective_log_softmax(logits, index):
|
| 35 |
+
logits = logits.to(torch.float32)
|
| 36 |
+
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
| 37 |
+
# loop to reduce peak mem consumption
|
| 38 |
+
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
| 39 |
+
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
| 40 |
+
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
| 41 |
+
return per_token_logps
|
| 42 |
+
|
| 43 |
+
def grpo_compute_loss(old_logits, new_logits, input_ids, mask, beta, advantages):
|
| 44 |
+
# All Unsloth Zoo code licensed under LGPLv3
|
| 45 |
+
old_logits = old_logits.to(torch.float32)
|
| 46 |
+
new_logits = new_logits.to(torch.float32)
|
| 47 |
+
input_ids = input_ids.unsqueeze(-1)
|
| 48 |
+
|
| 49 |
+
# x_i - logsumexp(x_i)
|
| 50 |
+
old_x = torch.gather(old_logits, dim = -1, index = input_ids).squeeze(-1)
|
| 51 |
+
new_x = torch.gather(new_logits, dim = -1, index = input_ids).squeeze(-1)
|
| 52 |
+
old = old_x - torch.logsumexp(old_logits, dim = -1)
|
| 53 |
+
new = new_x - torch.logsumexp(new_logits, dim = -1)
|
| 54 |
+
|
| 55 |
+
# Reverse KL
|
| 56 |
+
kl_i = torch.exp(old - new) - (old - new) - 1.0
|
| 57 |
+
# Full correct reverse KL divergence?? Missing term maybe?
|
| 58 |
+
# kl_i = torch.exp(new) * kl_i
|
| 59 |
+
|
| 60 |
+
# Below is forward KL (normal KL)
|
| 61 |
+
# kl_i = torch.exp(old) * (old - new)
|
| 62 |
+
|
| 63 |
+
# Must detach - otherwise gradients are not propagated correctly!
|
| 64 |
+
# exp(x - x) == 1
|
| 65 |
+
loss_i = torch.exp(new - new.detach()) * advantages.unsqueeze(1)
|
| 66 |
+
loss_i = -(loss_i - beta * kl_i)
|
| 67 |
+
|
| 68 |
+
mask = mask.to(torch.float32)
|
| 69 |
+
n_mask_per_reward = mask.sum(1)
|
| 70 |
+
|
| 71 |
+
# See https://github.com/huggingface/trl/pull/2881
|
| 72 |
+
loss_per_reward = (loss_i * mask).sum(1) / n_mask_per_reward
|
| 73 |
+
loss = loss_per_reward.mean()
|
| 74 |
+
# loss = (loss_i * mask).sum() / mask.sum()
|
| 75 |
+
|
| 76 |
+
# Get metrics as well which are folded
|
| 77 |
+
with torch.inference_mode():
|
| 78 |
+
completion_length = n_mask_per_reward.mean()
|
| 79 |
+
mean_kl_per_reward = (kl_i * mask).sum(1) / n_mask_per_reward
|
| 80 |
+
mean_kl = mean_kl_per_reward.mean()
|
| 81 |
+
pass
|
| 82 |
+
return loss, completion_length, mean_kl
|
| 83 |
+
|
| 84 |
+
class UnslothEfficientGRPO(torch.autograd.Function):
|
| 85 |
+
# All Unsloth Zoo code licensed under LGPLv3
|
| 86 |
+
@staticmethod
|
| 87 |
+
def forward(ctx, _new_hidden_states, _old_hidden_states, lm_head, _input_ids, _mask, _advantages, beta, scaler = None, n_chunks = 1):
|
| 88 |
+
def compute_loss(new_hidden_states, old_hidden_states, input_ids, mask, advantages, scaling):
|
| 89 |
+
new_logits = torch.matmul(new_hidden_states, lm_head.t())
|
| 90 |
+
new_logits = new_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred
|
| 91 |
+
old_logits = torch.matmul(old_hidden_states, lm_head.t())
|
| 92 |
+
old_logits = old_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred
|
| 93 |
+
loss, completion_length, mean_kl = grpo_compute_loss(
|
| 94 |
+
old_logits, new_logits, input_ids, mask, beta, advantages,
|
| 95 |
+
)
|
| 96 |
+
# Scale loss if needed for mixed precision training
|
| 97 |
+
scaled_loss = loss * scaling
|
| 98 |
+
# Must add .loss.detach otherwise autograd uses 2x VRAM
|
| 99 |
+
return scaled_loss, (loss.detach(), completion_length, mean_kl,)
|
| 100 |
+
pass
|
| 101 |
+
|
| 102 |
+
device =_new_hidden_states.device
|
| 103 |
+
grad_inputs = torch.empty_like(_new_hidden_states)
|
| 104 |
+
accumulated_loss = torch.zeros(1, device = device)
|
| 105 |
+
accumulated_completion_length = torch.zeros(1, device = device)
|
| 106 |
+
accumulated_mean_kl = torch.zeros(1, device = device)
|
| 107 |
+
|
| 108 |
+
def accumulate_chunk(new_hidden_states_j, old_hidden_states_j, input_ids_j, mask_j, advantages_j, scaling):
|
| 109 |
+
(chunk_grad_input,), (chunk_loss, (unscaled_loss, chunk_completion_length, chunk_mean_kl,)) = torch.func.grad_and_value(
|
| 110 |
+
compute_loss,
|
| 111 |
+
argnums = (0,),
|
| 112 |
+
has_aux = True,
|
| 113 |
+
)(new_hidden_states_j, old_hidden_states_j, input_ids_j, mask_j, advantages_j, scaling)
|
| 114 |
+
accumulated_loss .add_(unscaled_loss)
|
| 115 |
+
accumulated_completion_length.add_(chunk_completion_length)
|
| 116 |
+
accumulated_mean_kl .add_(chunk_mean_kl)
|
| 117 |
+
return chunk_grad_input
|
| 118 |
+
pass
|
| 119 |
+
|
| 120 |
+
accumulate_chunk = torch.compile(
|
| 121 |
+
accumulate_chunk,
|
| 122 |
+
fullgraph = True,
|
| 123 |
+
options = torch_compile_options,
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
grad_inputs_chunks = torch.chunk(grad_inputs, chunks = n_chunks, dim = 0)
|
| 127 |
+
new_hidden_states = torch.chunk(_new_hidden_states, chunks = n_chunks, dim = 0)
|
| 128 |
+
old_hidden_states = torch.chunk(_old_hidden_states, chunks = n_chunks, dim = 0)
|
| 129 |
+
input_ids = torch.chunk(_input_ids, chunks = n_chunks, dim = 0)
|
| 130 |
+
mask = torch.chunk(_mask, chunks = n_chunks, dim = 0)
|
| 131 |
+
advantages = torch.chunk(_advantages, chunks = n_chunks, dim = 0)
|
| 132 |
+
|
| 133 |
+
# Get mixed precision scaling if seen
|
| 134 |
+
scaling = scaler.get_scale() if scaler is not None else 1.0
|
| 135 |
+
|
| 136 |
+
# Force torch.compile to use dynamic shapes for seqlen dim
|
| 137 |
+
mark_dynamic = lambda x: torch._dynamo.mark_dynamic(x, 1)
|
| 138 |
+
|
| 139 |
+
for (grad_inputs_j, new_hidden_states_j, old_hidden_states_j, input_ids_j, mask_j, advantages_j,) in \
|
| 140 |
+
zip(grad_inputs_chunks, new_hidden_states, old_hidden_states, input_ids, mask, advantages):
|
| 141 |
+
|
| 142 |
+
mark_dynamic(new_hidden_states_j)
|
| 143 |
+
mark_dynamic(old_hidden_states_j)
|
| 144 |
+
mark_dynamic(input_ids_j)
|
| 145 |
+
mark_dynamic(mask_j)
|
| 146 |
+
|
| 147 |
+
grad_inputs_j.copy_(
|
| 148 |
+
accumulate_chunk(new_hidden_states_j, old_hidden_states_j, input_ids_j, mask_j, advantages_j, scaling)
|
| 149 |
+
)
|
| 150 |
+
pass
|
| 151 |
+
|
| 152 |
+
grad_inputs .div_(n_chunks)
|
| 153 |
+
accumulated_loss .div_(n_chunks)
|
| 154 |
+
accumulated_completion_length.div_(n_chunks)
|
| 155 |
+
accumulated_mean_kl .div_(n_chunks)
|
| 156 |
+
ctx.save_for_backward(grad_inputs)
|
| 157 |
+
|
| 158 |
+
return (
|
| 159 |
+
accumulated_loss,
|
| 160 |
+
accumulated_completion_length,
|
| 161 |
+
accumulated_mean_kl,
|
| 162 |
+
)
|
| 163 |
+
pass
|
| 164 |
+
|
| 165 |
+
@staticmethod
|
| 166 |
+
def backward(ctx, grad_output, dcompletion_length, dmean_kl):
|
| 167 |
+
(grad_input,) = ctx.saved_tensors
|
| 168 |
+
return (grad_input, None, None, None, None, None, None, None, None,)
|
| 169 |
+
pass
|
| 170 |
+
|
| 171 |
+
def grpo_accumulated_loss(
|
| 172 |
+
trainer,
|
| 173 |
+
input_ids,
|
| 174 |
+
logits_to_keep,
|
| 175 |
+
completion_mask,
|
| 176 |
+
advantages,
|
| 177 |
+
n_chunks = -1,
|
| 178 |
+
):
|
| 179 |
+
# All Unsloth Zoo code licensed under LGPLv3
|
| 180 |
+
bsz, qlen = input_ids.shape
|
| 181 |
+
# Find closest multiple
|
| 182 |
+
factors = [i for i in range(1, bsz + 1) if bsz % i == 0]
|
| 183 |
+
if n_chunks == -1: n_chunks = bsz
|
| 184 |
+
n_chunks = factors[min(np.searchsorted(factors, n_chunks), len(factors)-1)]
|
| 185 |
+
|
| 186 |
+
mixed_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16
|
| 187 |
+
os.environ["UNSLOTH_RETURN_HIDDEN_STATES"] = "1"
|
| 188 |
+
|
| 189 |
+
completion_input_ids = input_ids[:, -logits_to_keep:]
|
| 190 |
+
lm_head = trainer.model.get_output_embeddings().weight
|
| 191 |
+
|
| 192 |
+
with torch.amp.autocast(device_type = "cuda", dtype = mixed_dtype):
|
| 193 |
+
with torch.inference_mode(), trainer.accelerator.unwrap_model(trainer.model, keep_fp32_wrapper = False).disable_adapter():
|
| 194 |
+
old_hidden_states = trainer.model(input_ids = input_ids, logits_to_keep = logits_to_keep + 1).logits
|
| 195 |
+
pass
|
| 196 |
+
|
| 197 |
+
new_hidden_states = trainer.model(input_ids = input_ids, logits_to_keep = logits_to_keep + 1).logits
|
| 198 |
+
|
| 199 |
+
loss, completion_length, mean_kl = UnslothEfficientGRPO.apply(
|
| 200 |
+
new_hidden_states, old_hidden_states, lm_head,
|
| 201 |
+
completion_input_ids, completion_mask, advantages, trainer.beta,
|
| 202 |
+
trainer.accelerator.scaler,
|
| 203 |
+
n_chunks,
|
| 204 |
+
)
|
| 205 |
+
return loss, completion_length, mean_kl
|
| 206 |
+
|
| 207 |
+
# Old non efficient code path
|
| 208 |
+
new_logits = torch.matmul(new_hidden_states, lm_head.t())
|
| 209 |
+
new_logits = new_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred
|
| 210 |
+
old_logits = torch.matmul(old_hidden_states, lm_head.t())
|
| 211 |
+
old_logits = old_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred
|
| 212 |
+
loss, completion_length, mean_kl = grpo_compute_loss(
|
| 213 |
+
old_logits, new_logits, completion_input_ids, completion_mask, trainer.beta, advantages,
|
| 214 |
+
)
|
| 215 |
+
return loss, completion_length, mean_kl
|
| 216 |
+
pass
|
| 217 |
+
|
| 218 |
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options)
|
| 219 |
+
def grpo_compute_loss_slow(old_logits, new_logits, input_ids, mask, beta, advantages):
|
| 220 |
+
# All Unsloth Zoo code licensed under LGPLv3
|
| 221 |
+
old_logits = old_logits.to(torch.float32)
|
| 222 |
+
new_logits = new_logits.to(torch.float32)
|
| 223 |
+
input_ids = input_ids.unsqueeze(-1)
|
| 224 |
+
|
| 225 |
+
# x_i - logsumexp(x_i)
|
| 226 |
+
old_x = torch.gather(old_logits, dim = -1, index = input_ids).squeeze(-1)
|
| 227 |
+
new_x = torch.gather(new_logits, dim = -1, index = input_ids).squeeze(-1)
|
| 228 |
+
old = old_x - torch.logsumexp(old_logits, dim = -1)
|
| 229 |
+
new = new_x - torch.logsumexp(new_logits, dim = -1)
|
| 230 |
+
|
| 231 |
+
# Reverse KL
|
| 232 |
+
kl_i = torch.exp(old - new) - (old - new) - 1.0
|
| 233 |
+
# Full correct reverse KL divergence?? Missing term maybe?
|
| 234 |
+
# kl_i = torch.exp(new) * kl_i
|
| 235 |
+
|
| 236 |
+
# Below is forward KL (normal KL)
|
| 237 |
+
# kl_i = torch.exp(old) * (old - new)
|
| 238 |
+
|
| 239 |
+
# Must detach - otherwise gradients are not propagated correctly!
|
| 240 |
+
# exp(x - x) == 1
|
| 241 |
+
loss_i = torch.exp(new - new.detach()) * advantages.unsqueeze(1)
|
| 242 |
+
loss_i = -(loss_i - beta * kl_i)
|
| 243 |
+
|
| 244 |
+
mask = mask.to(torch.float32)
|
| 245 |
+
n_mask_per_reward = mask.sum(1)
|
| 246 |
+
|
| 247 |
+
# See https://github.com/huggingface/trl/pull/2881
|
| 248 |
+
loss_per_reward = (loss_i * mask).sum(1) / n_mask_per_reward
|
| 249 |
+
loss = loss_per_reward.mean()
|
| 250 |
+
# loss = (loss_i * mask).sum() / mask.sum()
|
| 251 |
+
|
| 252 |
+
# Get metrics as well which are folded
|
| 253 |
+
with torch.inference_mode():
|
| 254 |
+
completion_length = n_mask_per_reward.mean()
|
| 255 |
+
mean_kl_per_reward = (kl_i * mask).sum(1) / n_mask_per_reward
|
| 256 |
+
mean_kl = mean_kl_per_reward.mean()
|
| 257 |
+
pass
|
| 258 |
+
return loss, completion_length, mean_kl
|
| 259 |
+
|
| 260 |
+
def vLLMSamplingParams(**kwargs):
|
| 261 |
+
from vllm import SamplingParams
|
| 262 |
+
sampling_params = SamplingParams(**kwargs)
|
| 263 |
+
sampling_params._set_kwargs = kwargs
|
| 264 |
+
return sampling_params
|
| 265 |
+
@dataclass
|
| 266 |
+
class UnslothGRPOConfig(GRPOConfig):
|
| 267 |
+
"""
|
| 268 |
+
|
| 269 |
+
Configuration class for the [`GRPOTrainer`].
|
| 270 |
+
|
| 271 |
+
Only the parameters specific to GRPO training are listed here. For details on other parameters, refer to the
|
| 272 |
+
[`~transformers.TrainingArguments`] documentation.
|
| 273 |
+
|
| 274 |
+
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
| 275 |
+
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
| 276 |
+
command line.
|
| 277 |
+
|
| 278 |
+
Parameters:
|
| 279 |
+
> Parameters that control the model and reference model
|
| 280 |
+
|
| 281 |
+
model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
|
| 282 |
+
Keyword arguments for [`~transformers.AutoModelForCausalLM.from_pretrained`], used when the `model`
|
| 283 |
+
argument of the [`GRPOTrainer`] is provided as a string.
|
| 284 |
+
|
| 285 |
+
> Parameters that control the data preprocessing
|
| 286 |
+
|
| 287 |
+
remove_unused_columns (`bool`, *optional*, defaults to `False`):
|
| 288 |
+
Whether to only keep the column `"prompt"` in the dataset. If you use a custom reward function that
|
| 289 |
+
requires any column other than `"prompts"` and `"completions"`, you should keep this to `False`.
|
| 290 |
+
max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
|
| 291 |
+
Maximum length of the prompt. If the prompt is longer than this value, it will be truncated left.
|
| 292 |
+
num_generations (`int` or `None`, *optional*, defaults to `8`):
|
| 293 |
+
Number of generations per prompt to sample. The global batch size (num_processes * per_device_batch_size)
|
| 294 |
+
must be divisible by this value.
|
| 295 |
+
temperature (`float`, *optional*, defaults to `0.9`):
|
| 296 |
+
Temperature for sampling. The higher the temperature, the more random the completions.
|
| 297 |
+
max_completion_length (`int` or `None`, *optional*, defaults to `256`):
|
| 298 |
+
Maximum length of the generated completion.
|
| 299 |
+
ds3_gather_for_generation (`bool`, *optional*, defaults to `True`):
|
| 300 |
+
This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation,
|
| 301 |
+
improving generation speed. However, disabling this option allows training models that exceed the VRAM
|
| 302 |
+
capacity of a single GPU, albeit at the cost of slower generation. Disabling this option is not compatible
|
| 303 |
+
with vLLM generation.
|
| 304 |
+
|
| 305 |
+
> Parameters that control generation acceleration powered by vLLM
|
| 306 |
+
|
| 307 |
+
use_vllm (`bool`, *optional*, defaults to `False`):
|
| 308 |
+
Whether to use vLLM for generating completions. If set to `True`, ensure that a GPU is kept unused for
|
| 309 |
+
training, as vLLM will require one for generation. vLLM must be installed (`pip install vllm`).
|
| 310 |
+
vllm_device (`str`, *optional*, defaults to `"auto"`):
|
| 311 |
+
Device where vLLM generation will run, e.g. `"cuda:1"`. If set to `"auto"` (default), the system will
|
| 312 |
+
automatically select the next available GPU after the last one used for training. This assumes that
|
| 313 |
+
training has not already occupied all available GPUs. If only one device is available, the device will be
|
| 314 |
+
shared between both training and vLLM.
|
| 315 |
+
vllm_gpu_memory_utilization (`float`, *optional*, defaults to `0.9`):
|
| 316 |
+
Ratio (between 0 and 1) of GPU memory to reserve for the model weights, activations, and KV cache on the
|
| 317 |
+
device dedicated to generation powered by vLLM. Higher values will increase the KV cache size and thus
|
| 318 |
+
improve the model's throughput. However, if the value is too high, it may cause out-of-memory (OOM) errors
|
| 319 |
+
during initialization.
|
| 320 |
+
vllm_dtype (`str`, *optional*, defaults to `"auto"`):
|
| 321 |
+
Data type to use for vLLM generation. If set to `"auto"`, the data type will be automatically determined
|
| 322 |
+
based on the model configuration. Find the supported values in the vLLM documentation.
|
| 323 |
+
vllm_max_model_len (`int` or `None`, *optional*, defaults to `None`):
|
| 324 |
+
If set, the `max_model_len` to use for vLLM. This could be useful when running with reduced
|
| 325 |
+
`vllm_gpu_memory_utilization`, leading to a reduced KV cache size. If not set, vLLM will use the model
|
| 326 |
+
context size, which might be much larger than the KV cache, leading to inefficiencies.
|
| 327 |
+
|
| 328 |
+
> Parameters that control the training
|
| 329 |
+
|
| 330 |
+
learning_rate (`float`, *optional*, defaults to `1e-6`):
|
| 331 |
+
Initial learning rate for [`AdamW`] optimizer. The default value replaces that of
|
| 332 |
+
[`~transformers.TrainingArguments`].
|
| 333 |
+
beta (`float`, *optional*, defaults to `0.04`):
|
| 334 |
+
KL coefficient.
|
| 335 |
+
reward_weights (`list[float]` or `None`, *optional*, defaults to `None`):
|
| 336 |
+
Weights for each reward function. Must match the number of reward functions. If `None`, all rewards are
|
| 337 |
+
weighted equally with weight `1.0`.
|
| 338 |
+
sync_ref_model (`bool`, *optional*, defaults to `False`):
|
| 339 |
+
Whether to synchronize the reference model with the active model every `ref_model_sync_steps` steps, using
|
| 340 |
+
the `ref_model_mixup_alpha` parameter. This synchronization originites from the
|
| 341 |
+
[TR-DPO](https://huggingface.co/papers/2404.09656) paper.
|
| 342 |
+
ref_model_mixup_alpha (`float`, *optional*, defaults to `0.9`):
|
| 343 |
+
α parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper, which controls the mix
|
| 344 |
+
between the current policy and the previous reference policy during updates. The reference policy is
|
| 345 |
+
updated according to the equation: `π_ref = α * π_θ + (1 - α) * π_ref_prev`. To use this parameter, you
|
| 346 |
+
must set `sync_ref_model=True`.
|
| 347 |
+
ref_model_sync_steps (`int`, *optional*, defaults to `64`):
|
| 348 |
+
τ parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper, which determines how
|
| 349 |
+
frequently the current policy is synchronized with the reference policy. To use this parameter, you must
|
| 350 |
+
set `sync_ref_model=True`.
|
| 351 |
+
|
| 352 |
+
> Parameters that control the logging
|
| 353 |
+
|
| 354 |
+
log_completions (`bool`, *optional*, defaults to `False`):
|
| 355 |
+
Whether to log the completions during training.
|
| 356 |
+
|
| 357 |
+
"""
|
| 358 |
+
vllm_sampling_params: Optional[Any] = field(
|
| 359 |
+
default = None,
|
| 360 |
+
metadata = {'help': 'vLLM SamplingParams'},
|
| 361 |
+
)
|
| 362 |
+
unsloth_num_chunks : Optional[int] = field(
|
| 363 |
+
default = -1,
|
| 364 |
+
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
| 365 |
+
)
|
| 366 |
+
def __init__(
|
| 367 |
+
self,
|
| 368 |
+
output_dir = None,
|
| 369 |
+
overwrite_output_dir = None,
|
| 370 |
+
do_train = False,
|
| 371 |
+
do_eval = False,
|
| 372 |
+
do_predict = False,
|
| 373 |
+
eval_strategy = 'no',
|
| 374 |
+
prediction_loss_only = False,
|
| 375 |
+
per_device_train_batch_size = 4,
|
| 376 |
+
per_device_eval_batch_size = 4,
|
| 377 |
+
per_gpu_train_batch_size = None,
|
| 378 |
+
per_gpu_eval_batch_size = None,
|
| 379 |
+
gradient_accumulation_steps = 2,
|
| 380 |
+
eval_accumulation_steps = 2,
|
| 381 |
+
eval_delay = 0,
|
| 382 |
+
torch_empty_cache_steps = 250,
|
| 383 |
+
learning_rate = 5e-05,
|
| 384 |
+
weight_decay = 0.01,
|
| 385 |
+
adam_beta1 = 0.9,
|
| 386 |
+
adam_beta2 = 0.999,
|
| 387 |
+
adam_epsilon = 1e-08,
|
| 388 |
+
max_grad_norm = 1.0,
|
| 389 |
+
num_train_epochs = 3.0,
|
| 390 |
+
max_steps = -1,
|
| 391 |
+
lr_scheduler_type = 'linear',
|
| 392 |
+
warmup_ratio = 0.1,
|
| 393 |
+
warmup_steps = 0,
|
| 394 |
+
log_level = 'passive',
|
| 395 |
+
log_level_replica = 'warning',
|
| 396 |
+
log_on_each_node = True,
|
| 397 |
+
logging_dir = None,
|
| 398 |
+
logging_strategy = 'steps',
|
| 399 |
+
logging_first_step = False,
|
| 400 |
+
logging_steps = 1,
|
| 401 |
+
logging_nan_inf_filter = False,
|
| 402 |
+
save_strategy = 'steps',
|
| 403 |
+
save_steps = 500,
|
| 404 |
+
save_total_limit = None,
|
| 405 |
+
save_safetensors = True,
|
| 406 |
+
save_on_each_node = False,
|
| 407 |
+
save_only_model = False,
|
| 408 |
+
restore_callback_states_from_checkpoint = False,
|
| 409 |
+
no_cuda = False,
|
| 410 |
+
use_cpu = False,
|
| 411 |
+
use_mps_device = False,
|
| 412 |
+
seed = 3407,
|
| 413 |
+
data_seed = 3407,
|
| 414 |
+
jit_mode_eval = False,
|
| 415 |
+
use_ipex = False,
|
| 416 |
+
bf16 = False,
|
| 417 |
+
fp16 = False,
|
| 418 |
+
fp16_opt_level = 'O1',
|
| 419 |
+
half_precision_backend = 'auto',
|
| 420 |
+
bf16_full_eval = False,
|
| 421 |
+
fp16_full_eval = False,
|
| 422 |
+
tf32 = None,
|
| 423 |
+
local_rank = -1,
|
| 424 |
+
ddp_backend = None,
|
| 425 |
+
tpu_num_cores = None,
|
| 426 |
+
tpu_metrics_debug = False,
|
| 427 |
+
debug = '',
|
| 428 |
+
dataloader_drop_last = False,
|
| 429 |
+
eval_steps = None,
|
| 430 |
+
dataloader_num_workers = 0,
|
| 431 |
+
dataloader_prefetch_factor = None,
|
| 432 |
+
past_index = -1,
|
| 433 |
+
run_name = None,
|
| 434 |
+
disable_tqdm = None,
|
| 435 |
+
remove_unused_columns = False,
|
| 436 |
+
label_names = None,
|
| 437 |
+
load_best_model_at_end = False,
|
| 438 |
+
metric_for_best_model = None,
|
| 439 |
+
greater_is_better = None,
|
| 440 |
+
ignore_data_skip = False,
|
| 441 |
+
fsdp = '',
|
| 442 |
+
fsdp_min_num_params = 0,
|
| 443 |
+
fsdp_config = None,
|
| 444 |
+
tp_size = 0,
|
| 445 |
+
fsdp_transformer_layer_cls_to_wrap = None,
|
| 446 |
+
accelerator_config = None,
|
| 447 |
+
deepspeed = None,
|
| 448 |
+
label_smoothing_factor = 0.0,
|
| 449 |
+
optim = 'adamw_8bit',
|
| 450 |
+
optim_args = None,
|
| 451 |
+
adafactor = False,
|
| 452 |
+
group_by_length = False,
|
| 453 |
+
length_column_name = 'length',
|
| 454 |
+
report_to = None,
|
| 455 |
+
ddp_find_unused_parameters = None,
|
| 456 |
+
ddp_bucket_cap_mb = None,
|
| 457 |
+
ddp_broadcast_buffers = None,
|
| 458 |
+
dataloader_pin_memory = True,
|
| 459 |
+
dataloader_persistent_workers = False,
|
| 460 |
+
skip_memory_metrics = True,
|
| 461 |
+
use_legacy_prediction_loop = False,
|
| 462 |
+
push_to_hub = False,
|
| 463 |
+
resume_from_checkpoint = None,
|
| 464 |
+
hub_model_id = None,
|
| 465 |
+
hub_strategy = 'every_save',
|
| 466 |
+
hub_token = None,
|
| 467 |
+
hub_private_repo = None,
|
| 468 |
+
hub_always_push = False,
|
| 469 |
+
gradient_checkpointing = False,
|
| 470 |
+
gradient_checkpointing_kwargs = None,
|
| 471 |
+
include_inputs_for_metrics = False,
|
| 472 |
+
eval_do_concat_batches = True,
|
| 473 |
+
fp16_backend = 'auto',
|
| 474 |
+
push_to_hub_model_id = None,
|
| 475 |
+
push_to_hub_organization = None,
|
| 476 |
+
push_to_hub_token = None,
|
| 477 |
+
mp_parameters = '',
|
| 478 |
+
auto_find_batch_size = False,
|
| 479 |
+
full_determinism = False,
|
| 480 |
+
torchdynamo = None,
|
| 481 |
+
ray_scope = 'last',
|
| 482 |
+
ddp_timeout = 1800,
|
| 483 |
+
torch_compile = False,
|
| 484 |
+
torch_compile_backend = None,
|
| 485 |
+
torch_compile_mode = None,
|
| 486 |
+
include_tokens_per_second = False,
|
| 487 |
+
include_num_input_tokens_seen = False,
|
| 488 |
+
neftune_noise_alpha = None,
|
| 489 |
+
optim_target_modules = None,
|
| 490 |
+
batch_eval_metrics = False,
|
| 491 |
+
eval_on_start = False,
|
| 492 |
+
use_liger_kernel = False,
|
| 493 |
+
eval_use_gather_object = False,
|
| 494 |
+
average_tokens_across_devices = False,
|
| 495 |
+
model_init_kwargs = None,
|
| 496 |
+
max_prompt_length = 512,
|
| 497 |
+
num_generations = 8,
|
| 498 |
+
temperature = 0.9,
|
| 499 |
+
max_completion_length = 256,
|
| 500 |
+
ds3_gather_for_generation = True,
|
| 501 |
+
use_vllm = False,
|
| 502 |
+
vllm_device = 'auto',
|
| 503 |
+
vllm_gpu_memory_utilization = 0.9,
|
| 504 |
+
vllm_dtype = 'auto',
|
| 505 |
+
vllm_max_model_len = None,
|
| 506 |
+
beta = 0.04,
|
| 507 |
+
reward_weights = None,
|
| 508 |
+
sync_ref_model = False,
|
| 509 |
+
ref_model_mixup_alpha = 0.9,
|
| 510 |
+
ref_model_sync_steps = 64,
|
| 511 |
+
log_completions = False,
|
| 512 |
+
vllm_sampling_params = None,
|
| 513 |
+
unsloth_num_chunks = -1,
|
| 514 |
+
**kwargs,
|
| 515 |
+
):
|
| 516 |
+
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
| 517 |
+
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
| 518 |
+
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
| 519 |
+
output_dir = 'unsloth_training_checkpoints'
|
| 520 |
+
save_strategy = 'no'
|
| 521 |
+
div = per_device_train_batch_size // num_generations
|
| 522 |
+
if div * num_generations != per_device_train_batch_size:
|
| 523 |
+
print('Unsloth: We now expect `per_device_train_batch_size` to be a multiple of `num_generations`.\nWe will change the batch size of ' + str(per_device_train_batch_size) + ' to the `num_generations` of ' + str(num_generations))
|
| 524 |
+
per_device_train_batch_size = num_generations
|
| 525 |
+
|
| 526 |
+
super().__init__(
|
| 527 |
+
output_dir = output_dir,
|
| 528 |
+
overwrite_output_dir = overwrite_output_dir,
|
| 529 |
+
do_train = do_train,
|
| 530 |
+
do_eval = do_eval,
|
| 531 |
+
do_predict = do_predict,
|
| 532 |
+
eval_strategy = eval_strategy,
|
| 533 |
+
prediction_loss_only = prediction_loss_only,
|
| 534 |
+
per_device_train_batch_size = per_device_train_batch_size,
|
| 535 |
+
per_device_eval_batch_size = per_device_eval_batch_size,
|
| 536 |
+
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
| 537 |
+
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
| 538 |
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 539 |
+
eval_accumulation_steps = eval_accumulation_steps,
|
| 540 |
+
eval_delay = eval_delay,
|
| 541 |
+
torch_empty_cache_steps = torch_empty_cache_steps,
|
| 542 |
+
learning_rate = learning_rate,
|
| 543 |
+
weight_decay = weight_decay,
|
| 544 |
+
adam_beta1 = adam_beta1,
|
| 545 |
+
adam_beta2 = adam_beta2,
|
| 546 |
+
adam_epsilon = adam_epsilon,
|
| 547 |
+
max_grad_norm = max_grad_norm,
|
| 548 |
+
num_train_epochs = num_train_epochs,
|
| 549 |
+
max_steps = max_steps,
|
| 550 |
+
lr_scheduler_type = lr_scheduler_type,
|
| 551 |
+
warmup_ratio = warmup_ratio,
|
| 552 |
+
warmup_steps = warmup_steps,
|
| 553 |
+
log_level = log_level,
|
| 554 |
+
log_level_replica = log_level_replica,
|
| 555 |
+
log_on_each_node = log_on_each_node,
|
| 556 |
+
logging_dir = logging_dir,
|
| 557 |
+
logging_strategy = logging_strategy,
|
| 558 |
+
logging_first_step = logging_first_step,
|
| 559 |
+
logging_steps = logging_steps,
|
| 560 |
+
logging_nan_inf_filter = logging_nan_inf_filter,
|
| 561 |
+
save_strategy = save_strategy,
|
| 562 |
+
save_steps = save_steps,
|
| 563 |
+
save_total_limit = save_total_limit,
|
| 564 |
+
save_safetensors = save_safetensors,
|
| 565 |
+
save_on_each_node = save_on_each_node,
|
| 566 |
+
save_only_model = save_only_model,
|
| 567 |
+
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
| 568 |
+
no_cuda = no_cuda,
|
| 569 |
+
use_cpu = use_cpu,
|
| 570 |
+
use_mps_device = use_mps_device,
|
| 571 |
+
seed = seed,
|
| 572 |
+
data_seed = data_seed,
|
| 573 |
+
jit_mode_eval = jit_mode_eval,
|
| 574 |
+
use_ipex = use_ipex,
|
| 575 |
+
bf16 = bf16,
|
| 576 |
+
fp16 = fp16,
|
| 577 |
+
fp16_opt_level = fp16_opt_level,
|
| 578 |
+
half_precision_backend = half_precision_backend,
|
| 579 |
+
bf16_full_eval = bf16_full_eval,
|
| 580 |
+
fp16_full_eval = fp16_full_eval,
|
| 581 |
+
tf32 = tf32,
|
| 582 |
+
local_rank = local_rank,
|
| 583 |
+
ddp_backend = ddp_backend,
|
| 584 |
+
tpu_num_cores = tpu_num_cores,
|
| 585 |
+
tpu_metrics_debug = tpu_metrics_debug,
|
| 586 |
+
debug = debug,
|
| 587 |
+
dataloader_drop_last = dataloader_drop_last,
|
| 588 |
+
eval_steps = eval_steps,
|
| 589 |
+
dataloader_num_workers = dataloader_num_workers,
|
| 590 |
+
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
| 591 |
+
past_index = past_index,
|
| 592 |
+
run_name = run_name,
|
| 593 |
+
disable_tqdm = disable_tqdm,
|
| 594 |
+
remove_unused_columns = remove_unused_columns,
|
| 595 |
+
label_names = label_names,
|
| 596 |
+
load_best_model_at_end = load_best_model_at_end,
|
| 597 |
+
metric_for_best_model = metric_for_best_model,
|
| 598 |
+
greater_is_better = greater_is_better,
|
| 599 |
+
ignore_data_skip = ignore_data_skip,
|
| 600 |
+
fsdp = fsdp,
|
| 601 |
+
fsdp_min_num_params = fsdp_min_num_params,
|
| 602 |
+
fsdp_config = fsdp_config,
|
| 603 |
+
tp_size = tp_size,
|
| 604 |
+
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
| 605 |
+
accelerator_config = accelerator_config,
|
| 606 |
+
deepspeed = deepspeed,
|
| 607 |
+
label_smoothing_factor = label_smoothing_factor,
|
| 608 |
+
optim = optim,
|
| 609 |
+
optim_args = optim_args,
|
| 610 |
+
adafactor = adafactor,
|
| 611 |
+
group_by_length = group_by_length,
|
| 612 |
+
length_column_name = length_column_name,
|
| 613 |
+
report_to = report_to,
|
| 614 |
+
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
| 615 |
+
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
| 616 |
+
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
| 617 |
+
dataloader_pin_memory = dataloader_pin_memory,
|
| 618 |
+
dataloader_persistent_workers = dataloader_persistent_workers,
|
| 619 |
+
skip_memory_metrics = skip_memory_metrics,
|
| 620 |
+
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
| 621 |
+
push_to_hub = push_to_hub,
|
| 622 |
+
resume_from_checkpoint = resume_from_checkpoint,
|
| 623 |
+
hub_model_id = hub_model_id,
|
| 624 |
+
hub_strategy = hub_strategy,
|
| 625 |
+
hub_token = hub_token,
|
| 626 |
+
hub_private_repo = hub_private_repo,
|
| 627 |
+
hub_always_push = hub_always_push,
|
| 628 |
+
gradient_checkpointing = gradient_checkpointing,
|
| 629 |
+
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
| 630 |
+
include_inputs_for_metrics = include_inputs_for_metrics,
|
| 631 |
+
eval_do_concat_batches = eval_do_concat_batches,
|
| 632 |
+
fp16_backend = fp16_backend,
|
| 633 |
+
push_to_hub_model_id = push_to_hub_model_id,
|
| 634 |
+
push_to_hub_organization = push_to_hub_organization,
|
| 635 |
+
push_to_hub_token = push_to_hub_token,
|
| 636 |
+
mp_parameters = mp_parameters,
|
| 637 |
+
auto_find_batch_size = auto_find_batch_size,
|
| 638 |
+
full_determinism = full_determinism,
|
| 639 |
+
torchdynamo = torchdynamo,
|
| 640 |
+
ray_scope = ray_scope,
|
| 641 |
+
ddp_timeout = ddp_timeout,
|
| 642 |
+
torch_compile = torch_compile,
|
| 643 |
+
torch_compile_backend = torch_compile_backend,
|
| 644 |
+
torch_compile_mode = torch_compile_mode,
|
| 645 |
+
include_tokens_per_second = include_tokens_per_second,
|
| 646 |
+
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
| 647 |
+
neftune_noise_alpha = neftune_noise_alpha,
|
| 648 |
+
optim_target_modules = optim_target_modules,
|
| 649 |
+
batch_eval_metrics = batch_eval_metrics,
|
| 650 |
+
eval_on_start = eval_on_start,
|
| 651 |
+
use_liger_kernel = use_liger_kernel,
|
| 652 |
+
eval_use_gather_object = eval_use_gather_object,
|
| 653 |
+
average_tokens_across_devices = average_tokens_across_devices,
|
| 654 |
+
model_init_kwargs = model_init_kwargs,
|
| 655 |
+
max_prompt_length = max_prompt_length,
|
| 656 |
+
num_generations = num_generations,
|
| 657 |
+
temperature = temperature,
|
| 658 |
+
max_completion_length = max_completion_length,
|
| 659 |
+
ds3_gather_for_generation = ds3_gather_for_generation,
|
| 660 |
+
use_vllm = use_vllm,
|
| 661 |
+
vllm_device = vllm_device,
|
| 662 |
+
vllm_gpu_memory_utilization = vllm_gpu_memory_utilization,
|
| 663 |
+
vllm_dtype = vllm_dtype,
|
| 664 |
+
vllm_max_model_len = vllm_max_model_len,
|
| 665 |
+
beta = beta,
|
| 666 |
+
reward_weights = reward_weights,
|
| 667 |
+
sync_ref_model = sync_ref_model,
|
| 668 |
+
ref_model_mixup_alpha = ref_model_mixup_alpha,
|
| 669 |
+
ref_model_sync_steps = ref_model_sync_steps,
|
| 670 |
+
log_completions = log_completions,**kwargs)
|
| 671 |
+
self.vllm_sampling_params = vllm_sampling_params
|
| 672 |
+
self.unsloth_num_chunks = unsloth_num_chunks
|
| 673 |
+
pass
|
| 674 |
+
|
| 675 |
+
class _UnslothGRPOTrainer(Trainer):
|
| 676 |
+
""""""
|
| 677 |
+
|
| 678 |
+
_tag_names = ["trl", "grpo"]
|
| 679 |
+
|
| 680 |
+
def __init__(
|
| 681 |
+
self,
|
| 682 |
+
model: Union[str, PreTrainedModel],
|
| 683 |
+
reward_funcs: Union[RewardFunc, list[RewardFunc]],
|
| 684 |
+
args: GRPOConfig = None,
|
| 685 |
+
train_dataset: Optional[Union[Dataset, IterableDataset]] = None,
|
| 686 |
+
eval_dataset: Optional[Union[Dataset, IterableDataset, dict[str, Union[Dataset, IterableDataset]]]] = None,
|
| 687 |
+
processing_class: Optional[PreTrainedTokenizerBase] = None,
|
| 688 |
+
reward_processing_classes: Optional[Union[PreTrainedTokenizerBase, list[PreTrainedTokenizerBase]]] = None,
|
| 689 |
+
callbacks: Optional[list[TrainerCallback]] = None,
|
| 690 |
+
optimizers: tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]] = (None, None),
|
| 691 |
+
peft_config: Optional["PeftConfig"] = None,
|
| 692 |
+
):
|
| 693 |
+
|
| 694 |
+
if hasattr(model, 'vllm_engine') and hasattr(args, 'use_vllm') and (getattr(args, 'use_vllm', False) == False): args.use_vllm = True
|
| 695 |
+
# Args
|
| 696 |
+
if args is None:
|
| 697 |
+
model_name = model if isinstance(model, str) else model.config._name_or_path
|
| 698 |
+
model_name = model_name.split("/")[-1]
|
| 699 |
+
args = GRPOConfig(f"{model_name}-GRPO")
|
| 700 |
+
|
| 701 |
+
# Models
|
| 702 |
+
# Trained model
|
| 703 |
+
model_init_kwargs = args.model_init_kwargs or {}
|
| 704 |
+
if isinstance(model, str):
|
| 705 |
+
model_id = model
|
| 706 |
+
torch_dtype = model_init_kwargs.get("torch_dtype")
|
| 707 |
+
if isinstance(torch_dtype, torch.dtype) or torch_dtype == "auto" or torch_dtype is None:
|
| 708 |
+
pass # torch_dtype is already a torch.dtype or "auto" or None
|
| 709 |
+
elif isinstance(torch_dtype, str): # it's a str, but not "auto"
|
| 710 |
+
torch_dtype = getattr(torch, torch_dtype)
|
| 711 |
+
model_init_kwargs["torch_dtype"] = torch_dtype
|
| 712 |
+
else:
|
| 713 |
+
raise ValueError(
|
| 714 |
+
"Invalid `torch_dtype` passed to `GRPOConfig`. Expected either 'auto' or a string representing "
|
| 715 |
+
f"a `torch.dtype` (e.g., 'float32'), but got {torch_dtype}."
|
| 716 |
+
)
|
| 717 |
+
# Disable caching if gradient checkpointing is enabled (not supported)
|
| 718 |
+
model_init_kwargs["use_cache"] = (
|
| 719 |
+
False if args.gradient_checkpointing else model_init_kwargs.get("use_cache")
|
| 720 |
+
)
|
| 721 |
+
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
|
| 722 |
+
else:
|
| 723 |
+
model_id = model.config._name_or_path
|
| 724 |
+
if args.model_init_kwargs is not None:
|
| 725 |
+
raise ValueError(
|
| 726 |
+
"You passed `model_init_kwargs` to the `GRPOConfig`, but your model is already instantiated. "
|
| 727 |
+
"This argument can only be used when the `model` argument is a string."
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
if False:
|
| 731 |
+
model = model
|
| 732 |
+
|
| 733 |
+
# Reference model
|
| 734 |
+
if is_deepspeed_zero3_enabled():
|
| 735 |
+
self.ref_model = AutoModelForCausalLM.from_pretrained(model_id, **model_init_kwargs)
|
| 736 |
+
elif not is_peft_model(model):
|
| 737 |
+
# If PEFT configuration is not provided, create a reference model based on the initial model.
|
| 738 |
+
self.ref_model = create_reference_model(model)
|
| 739 |
+
else:
|
| 740 |
+
# If PEFT is used, the reference model is not needed since the adapter can be disabled
|
| 741 |
+
# to revert to the initial model.
|
| 742 |
+
self.ref_model = None
|
| 743 |
+
|
| 744 |
+
# Processing class
|
| 745 |
+
if processing_class is None:
|
| 746 |
+
processing_class = AutoTokenizer.from_pretrained(model.config._name_or_path, padding_side="left")
|
| 747 |
+
|
| 748 |
+
# Reward functions
|
| 749 |
+
if not isinstance(reward_funcs, list):
|
| 750 |
+
reward_funcs = [reward_funcs]
|
| 751 |
+
for i, reward_func in enumerate(reward_funcs):
|
| 752 |
+
if isinstance(reward_func, str):
|
| 753 |
+
reward_funcs[i] = AutoModelForSequenceClassification.from_pretrained(
|
| 754 |
+
reward_func, num_labels=1, **model_init_kwargs
|
| 755 |
+
)
|
| 756 |
+
self.reward_funcs = reward_funcs
|
| 757 |
+
|
| 758 |
+
# Reward weights
|
| 759 |
+
if args.reward_weights is not None:
|
| 760 |
+
if len(args.reward_weights) != len(reward_funcs):
|
| 761 |
+
raise ValueError(
|
| 762 |
+
f"Number of reward weights ({len(args.reward_weights)}) must match number of reward "
|
| 763 |
+
f"functions ({len(reward_funcs)})"
|
| 764 |
+
)
|
| 765 |
+
self.reward_weights = torch.tensor(args.reward_weights, dtype=torch.float32)
|
| 766 |
+
else:
|
| 767 |
+
self.reward_weights = torch.ones(len(reward_funcs), dtype=torch.float32)
|
| 768 |
+
|
| 769 |
+
# Reward processing class
|
| 770 |
+
if reward_processing_classes is None:
|
| 771 |
+
reward_processing_classes = [None] * len(reward_funcs)
|
| 772 |
+
elif not isinstance(reward_processing_classes, list):
|
| 773 |
+
reward_processing_classes = [reward_processing_classes]
|
| 774 |
+
else:
|
| 775 |
+
if len(reward_processing_classes) != len(reward_funcs):
|
| 776 |
+
raise ValueError("The number of reward processing classes must match the number of reward functions.")
|
| 777 |
+
|
| 778 |
+
for i, (reward_processing_class, reward_func) in enumerate(zip(reward_processing_classes, reward_funcs)):
|
| 779 |
+
if isinstance(reward_func, PreTrainedModel):
|
| 780 |
+
if reward_processing_class is None:
|
| 781 |
+
reward_processing_class = AutoTokenizer.from_pretrained(reward_func.config._name_or_path)
|
| 782 |
+
if reward_processing_class.pad_token_id is None:
|
| 783 |
+
reward_processing_class.pad_token = reward_processing_class.eos_token
|
| 784 |
+
# The reward model computes the reward for the latest non-padded token in the input sequence.
|
| 785 |
+
# So it's important to set the pad token ID to the padding token ID of the processing class.
|
| 786 |
+
reward_func.config.pad_token_id = reward_processing_class.pad_token_id
|
| 787 |
+
reward_processing_classes[i] = reward_processing_class
|
| 788 |
+
self.reward_processing_classes = reward_processing_classes
|
| 789 |
+
|
| 790 |
+
# Data collator
|
| 791 |
+
def data_collator(features): # No data collation is needed in GRPO
|
| 792 |
+
return features
|
| 793 |
+
|
| 794 |
+
# Training arguments
|
| 795 |
+
self.max_prompt_length = args.max_prompt_length
|
| 796 |
+
self.max_completion_length = args.max_completion_length # = |o_i| in the GRPO paper
|
| 797 |
+
self.num_generations = args.num_generations # = G in the GRPO paper
|
| 798 |
+
self.use_vllm = args.use_vllm
|
| 799 |
+
|
| 800 |
+
self.beta = args.beta
|
| 801 |
+
|
| 802 |
+
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
|
| 803 |
+
# input tensor associated with the key "input_ids". However, in GRPO, the sampled data does not include the
|
| 804 |
+
# "input_ids" key. Instead, the available keys is "prompt". As a result, the trainer issues the warning:
|
| 805 |
+
# "Could not estimate the number of tokens of the input, floating-point operations will not be computed." To
|
| 806 |
+
# suppress this warning, we set the "estimate_tokens" key in the model's "warnings_issued" dictionary to True.
|
| 807 |
+
# This acts as a flag to indicate that the warning has already been issued.
|
| 808 |
+
model.warnings_issued["estimate_tokens"] = True
|
| 809 |
+
|
| 810 |
+
# Initialize the metrics
|
| 811 |
+
self._metrics = defaultdict(list)
|
| 812 |
+
self.log_completions = args.log_completions
|
| 813 |
+
|
| 814 |
+
super().__init__(
|
| 815 |
+
model=model,
|
| 816 |
+
args=args,
|
| 817 |
+
data_collator=data_collator,
|
| 818 |
+
train_dataset=train_dataset,
|
| 819 |
+
eval_dataset=eval_dataset,
|
| 820 |
+
processing_class=processing_class,
|
| 821 |
+
callbacks=callbacks,
|
| 822 |
+
optimizers=optimizers,
|
| 823 |
+
)
|
| 824 |
+
|
| 825 |
+
# Check if the per_device_train/eval_batch_size * num processes can be divided by the number of generations
|
| 826 |
+
num_processes = self.accelerator.num_processes
|
| 827 |
+
global_batch_size = args.per_device_train_batch_size * num_processes
|
| 828 |
+
possible_values = [n_gen for n_gen in range(2, global_batch_size + 1) if (global_batch_size) % n_gen == 0]
|
| 829 |
+
if self.num_generations not in possible_values:
|
| 830 |
+
raise ValueError(
|
| 831 |
+
f"The global train batch size ({num_processes} x {args.per_device_train_batch_size}) must be evenly "
|
| 832 |
+
f"divisible by the number of generations per prompt ({self.num_generations}). Given the current train "
|
| 833 |
+
f"batch size, the valid values for the number of generations are: {possible_values}."
|
| 834 |
+
)
|
| 835 |
+
if self.args.eval_strategy != "no":
|
| 836 |
+
global_batch_size = args.per_device_eval_batch_size * num_processes
|
| 837 |
+
possible_values = [n_gen for n_gen in range(2, global_batch_size + 1) if (global_batch_size) % n_gen == 0]
|
| 838 |
+
if self.num_generations not in possible_values:
|
| 839 |
+
raise ValueError(
|
| 840 |
+
f"The global eval batch size ({num_processes} x {args.per_device_eval_batch_size}) must be evenly "
|
| 841 |
+
f"divisible by the number of generations per prompt ({self.num_generations}). Given the current "
|
| 842 |
+
f"eval batch size, the valid values for the number of generations are: {possible_values}."
|
| 843 |
+
)
|
| 844 |
+
|
| 845 |
+
# Ensure each process receives a unique seed to prevent duplicate completions when generating with
|
| 846 |
+
# transformers if num_generations exceeds per_device_train_batch_size. We could skip it if we use vLLM, but
|
| 847 |
+
# it's safer to set it in all cases.
|
| 848 |
+
set_seed(args.seed, device_specific=True)
|
| 849 |
+
|
| 850 |
+
if self.use_vllm:
|
| 851 |
+
self.llm = model.vllm_engine; self._last_loaded_step = 0; self.sampling_params = SamplingParams(
|
| 852 |
+
temperature=args.temperature,
|
| 853 |
+
max_tokens=self.max_completion_length,**getattr(getattr(args, 'vllm_sampling_params', vLLMSamplingParams()), '_set_kwargs', {}),)
|
| 854 |
+
else:
|
| 855 |
+
self.generation_config = GenerationConfig(
|
| 856 |
+
max_new_tokens=self.max_completion_length,
|
| 857 |
+
do_sample=True,
|
| 858 |
+
temperature=args.temperature,
|
| 859 |
+
pad_token_id=processing_class.pad_token_id,
|
| 860 |
+
)
|
| 861 |
+
|
| 862 |
+
# Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
|
| 863 |
+
# model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
|
| 864 |
+
# self.model_accepts_loss_kwargs to False to enable scaling.
|
| 865 |
+
self.model_accepts_loss_kwargs = False
|
| 866 |
+
|
| 867 |
+
# Add tags to the model
|
| 868 |
+
self.model.add_model_tags(self._tag_names)
|
| 869 |
+
|
| 870 |
+
if self.ref_model is not None:
|
| 871 |
+
if self.is_deepspeed_enabled:
|
| 872 |
+
self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator)
|
| 873 |
+
else:
|
| 874 |
+
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
|
| 875 |
+
|
| 876 |
+
if args.sync_ref_model:
|
| 877 |
+
self.add_callback(SyncRefModelCallback(ref_model=self.ref_model, accelerator=self.accelerator))
|
| 878 |
+
|
| 879 |
+
for i, reward_func in enumerate(self.reward_funcs):
|
| 880 |
+
if isinstance(reward_func, PreTrainedModel):
|
| 881 |
+
self.reward_funcs[i] = self.accelerator.prepare_model(reward_func, evaluation_mode=True)
|
| 882 |
+
|
| 883 |
+
def _set_signature_columns_if_needed(self):
|
| 884 |
+
# If `self.args.remove_unused_columns` is True, non-signature columns are removed.
|
| 885 |
+
# By default, this method sets `self._signature_columns` to the model's expected inputs.
|
| 886 |
+
# In GRPOTrainer, we preprocess data, so using the model's signature columns doesn't work.
|
| 887 |
+
# Instead, we set them to the columns expected by the `training_step` method, hence the override.
|
| 888 |
+
if self._signature_columns is None:
|
| 889 |
+
self._signature_columns = ["prompt"]
|
| 890 |
+
|
| 891 |
+
def _get_train_sampler(self) -> Sampler:
|
| 892 |
+
# Returns a sampler that ensures each prompt is repeated across multiple processes. This guarantees that
|
| 893 |
+
# identical prompts are distributed to different GPUs, allowing rewards to be computed and normalized correctly
|
| 894 |
+
# within each prompt group. Using the same seed across processes ensures consistent prompt assignment,
|
| 895 |
+
# preventing discrepancies in group formation.
|
| 896 |
+
return RepeatRandomSampler(self.train_dataset, self.num_generations, seed=self.args.seed)
|
| 897 |
+
|
| 898 |
+
def _get_eval_sampler(self, eval_dataset) -> Sampler:
|
| 899 |
+
# Returns a sampler that ensures each prompt is repeated across multiple processes. This guarantees that
|
| 900 |
+
# identical prompts are distributed to different GPUs, allowing rewards to be computed and normalized correctly
|
| 901 |
+
# within each prompt group. Using the same seed across processes ensures consistent prompt assignment,
|
| 902 |
+
# preventing discrepancies in group formation.
|
| 903 |
+
return RepeatRandomSampler(eval_dataset, self.num_generations, seed=self.args.seed)
|
| 904 |
+
|
| 905 |
+
# Get the per-token log probabilities for the completions for the model and the reference model
|
| 906 |
+
def _get_per_token_logps(self, model, input_ids, attention_mask, logits_to_keep):
|
| 907 |
+
if os.environ.get('UNSLOTH_USE_NEW_MODEL', '0') == '0':
|
| 908 |
+
return None # Unsloth efficient GRPO
|
| 909 |
+
# Otherwise, calculate normally:
|
| 910 |
+
if not hasattr(self, '_autocast_dtype'):
|
| 911 |
+
self._autocast_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16
|
| 912 |
+
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1': self._autocast_dtype = torch.float16
|
| 913 |
+
with torch.amp.autocast(device_type = 'cuda', dtype = self._autocast_dtype):
|
| 914 |
+
# We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded
|
| 915 |
+
logits = model(input_ids=input_ids, attention_mask=attention_mask, logits_to_keep=logits_to_keep + 1).logits
|
| 916 |
+
logits = logits[:, :-1, :] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred
|
| 917 |
+
|
| 918 |
+
input_ids = input_ids[:, -logits_to_keep:]
|
| 919 |
+
# For transformers<=4.48, logits_to_keep argument isn't supported, so here we drop logits ourselves.
|
| 920 |
+
# See https://github.com/huggingface/trl/issues/2770
|
| 921 |
+
logits = logits[:, -logits_to_keep:]
|
| 922 |
+
return logits
|
| 923 |
+
# return selective_log_softmax(logits, input_ids) # compute logprobs for the input tokens
|
| 924 |
+
pass
|
| 925 |
+
|
| 926 |
+
def _move_model_to_vllm(self, *args, **kwargs): return None
|
| 927 |
+
|
| 928 |
+
def _prepare_inputs(self, inputs: dict[str, Union[torch.Tensor, Any]]) -> dict[str, Union[torch.Tensor, Any]]:
|
| 929 |
+
device = self.accelerator.device
|
| 930 |
+
prompts = [x["prompt"] for x in inputs]
|
| 931 |
+
prompts_text = [maybe_apply_chat_template(example, self.processing_class)["prompt"] for example in inputs]
|
| 932 |
+
prompt_inputs = self.processing_class(
|
| 933 |
+
prompts_text, return_tensors="pt", padding=True, padding_side="left", add_special_tokens=False
|
| 934 |
+
)
|
| 935 |
+
prompt_inputs = super()._prepare_inputs(prompt_inputs)
|
| 936 |
+
prompt_ids, prompt_mask = prompt_inputs["input_ids"], prompt_inputs["attention_mask"]
|
| 937 |
+
|
| 938 |
+
if self.max_prompt_length is not None:
|
| 939 |
+
prompt_ids = prompt_ids[:, -self.max_prompt_length :]
|
| 940 |
+
prompt_mask = prompt_mask[:, -self.max_prompt_length :]
|
| 941 |
+
|
| 942 |
+
# Generate completions using either vLLM or regular generation
|
| 943 |
+
if self.args.use_vllm:
|
| 944 |
+
# First, have main process load weights if needed
|
| 945 |
+
if self.state.global_step != self._last_loaded_step:
|
| 946 |
+
self._move_model_to_vllm()
|
| 947 |
+
self._last_loaded_step = self.state.global_step
|
| 948 |
+
|
| 949 |
+
# Generate completions using vLLM: gather all prompts and use them in a single call in the main process
|
| 950 |
+
all_prompts_text = gather_object(prompts_text)
|
| 951 |
+
if self.accelerator.is_main_process:
|
| 952 |
+
outputs = self.llm.generate(all_prompts_text, sampling_params=self.sampling_params, use_tqdm=False, lora_request = self.model.load_lora('grpo_trainer_lora_model', load_tensors = True))
|
| 953 |
+
completion_ids = [out.token_ids for completions in outputs for out in completions.outputs]
|
| 954 |
+
else:
|
| 955 |
+
completion_ids = [None] * len(all_prompts_text)
|
| 956 |
+
# Broadcast the completions from the main process to all processes, ensuring each process receives its
|
| 957 |
+
# corresponding slice.
|
| 958 |
+
completion_ids = broadcast_object_list(completion_ids, from_process=0)
|
| 959 |
+
process_slice = slice(
|
| 960 |
+
self.accelerator.process_index * len(prompts),
|
| 961 |
+
(self.accelerator.process_index + 1) * len(prompts),
|
| 962 |
+
)
|
| 963 |
+
completion_ids = completion_ids[process_slice]
|
| 964 |
+
|
| 965 |
+
# Pad the completions, and concatenate them with the prompts
|
| 966 |
+
completion_ids = [torch.tensor(ids, device=device) for ids in completion_ids]
|
| 967 |
+
completion_ids = pad(completion_ids, padding_value=self.processing_class.pad_token_id)
|
| 968 |
+
prompt_completion_ids = torch.cat([prompt_ids, completion_ids], dim=1)
|
| 969 |
+
else:
|
| 970 |
+
# Regular generation path
|
| 971 |
+
with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model:
|
| 972 |
+
prompt_completion_ids = unwrapped_model.generate(
|
| 973 |
+
prompt_ids, attention_mask=prompt_mask, generation_config=self.generation_config
|
| 974 |
+
)
|
| 975 |
+
|
| 976 |
+
# Compute prompt length and extract completion ids
|
| 977 |
+
prompt_length = prompt_ids.size(1)
|
| 978 |
+
prompt_ids = prompt_completion_ids[:, :prompt_length]
|
| 979 |
+
completion_ids = prompt_completion_ids[:, prompt_length:]
|
| 980 |
+
|
| 981 |
+
# Mask everything after the first EOS token
|
| 982 |
+
is_eos = completion_ids == self.processing_class.eos_token_id
|
| 983 |
+
eos_idx = torch.full((is_eos.size(0),), is_eos.size(1), dtype=torch.long, device=device)
|
| 984 |
+
eos_idx[is_eos.any(dim=1)] = is_eos.int().argmax(dim=1)[is_eos.any(dim=1)]
|
| 985 |
+
sequence_indices = torch.arange(is_eos.size(1), device=device).expand(is_eos.size(0), -1)
|
| 986 |
+
completion_mask = (sequence_indices <= eos_idx.unsqueeze(1)).int()
|
| 987 |
+
|
| 988 |
+
# Concatenate prompt_mask with completion_mask for logit computation
|
| 989 |
+
attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) # (B*G, P+C)
|
| 990 |
+
|
| 991 |
+
logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens
|
| 992 |
+
|
| 993 |
+
with torch.inference_mode(), torch.amp.autocast(device_type = 'cuda', dtype = ((torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16) if not torch.is_autocast_enabled('cuda') else nullcontext())if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '0' else torch.float16):
|
| 994 |
+
if self.ref_model is not None:
|
| 995 |
+
ref_per_token_logps = self._get_per_token_logps(
|
| 996 |
+
self.ref_model, prompt_completion_ids, attention_mask, logits_to_keep
|
| 997 |
+
)
|
| 998 |
+
else:
|
| 999 |
+
with self.accelerator.unwrap_model(self.model, keep_fp32_wrapper = False).disable_adapter():
|
| 1000 |
+
ref_per_token_logps = self._get_per_token_logps(
|
| 1001 |
+
self.model, prompt_completion_ids, attention_mask, logits_to_keep
|
| 1002 |
+
)
|
| 1003 |
+
|
| 1004 |
+
# Decode the generated completions
|
| 1005 |
+
completions_text = self.processing_class.batch_decode(completion_ids, skip_special_tokens=True)
|
| 1006 |
+
if is_conversational(inputs[0]):
|
| 1007 |
+
completions = []
|
| 1008 |
+
for prompt, completion in zip(prompts, completions_text):
|
| 1009 |
+
bootstrap = prompt.pop()["content"] if prompt[-1]["role"] == "assistant" else ""
|
| 1010 |
+
completions.append([{"role": "assistant", "content": bootstrap + completion}])
|
| 1011 |
+
else:
|
| 1012 |
+
completions = completions_text
|
| 1013 |
+
|
| 1014 |
+
rewards_per_func = torch.zeros(len(prompts), len(self.reward_funcs), device=device)
|
| 1015 |
+
for i, (reward_func, reward_processing_class) in enumerate(
|
| 1016 |
+
zip(self.reward_funcs, self.reward_processing_classes)
|
| 1017 |
+
):
|
| 1018 |
+
if isinstance(reward_func, nn.Module): # Module instead of PretrainedModel for compat with compiled models
|
| 1019 |
+
if is_conversational(inputs[0]):
|
| 1020 |
+
messages = [{"messages": p + c} for p, c in zip(prompts, completions)]
|
| 1021 |
+
texts = [apply_chat_template(x, reward_processing_class)["text"] for x in messages]
|
| 1022 |
+
else:
|
| 1023 |
+
texts = [p + c for p, c in zip(prompts, completions)]
|
| 1024 |
+
reward_inputs = reward_processing_class(
|
| 1025 |
+
texts, return_tensors="pt", padding=True, padding_side="right", add_special_tokens=False
|
| 1026 |
+
)
|
| 1027 |
+
reward_inputs = super()._prepare_inputs(reward_inputs)
|
| 1028 |
+
with torch.inference_mode(), torch.amp.autocast(device_type = 'cuda', dtype = ((torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16) if not torch.is_autocast_enabled('cuda') else nullcontext())if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '0' else torch.float16):
|
| 1029 |
+
rewards_per_func[:, i] = reward_func(**reward_inputs).logits[:, 0] # Shape (B*G,)
|
| 1030 |
+
else:
|
| 1031 |
+
# Repeat all input columns (but "prompt" and "completion") to match the number of generations
|
| 1032 |
+
keys = [key for key in inputs[0] if key not in ["prompt", "completion"]]
|
| 1033 |
+
reward_kwargs = {key: [example[key] for example in inputs] for key in keys}
|
| 1034 |
+
output_reward_func = reward_func(prompts=prompts, completions=completions, **reward_kwargs)
|
| 1035 |
+
rewards_per_func[:, i] = torch.tensor(output_reward_func, dtype=torch.float32, device=device)
|
| 1036 |
+
|
| 1037 |
+
# Gather the reward per function: this part is crucial, because the rewards are normalized per group and the
|
| 1038 |
+
# completions may be distributed across processes
|
| 1039 |
+
rewards_per_func = gather(rewards_per_func)
|
| 1040 |
+
|
| 1041 |
+
# Apply weights to each reward function's output and sum
|
| 1042 |
+
rewards = (rewards_per_func * self.reward_weights.to(device).unsqueeze(0)).sum(dim=1)
|
| 1043 |
+
|
| 1044 |
+
# Compute grouped-wise rewards
|
| 1045 |
+
mean_grouped_rewards = rewards.view(-1, self.num_generations).mean(dim=1)
|
| 1046 |
+
std_grouped_rewards = rewards.view(-1, self.num_generations).std(dim=1)
|
| 1047 |
+
|
| 1048 |
+
# Normalize the rewards to compute the advantages
|
| 1049 |
+
mean_grouped_rewards = mean_grouped_rewards.repeat_interleave(self.num_generations, dim=0)
|
| 1050 |
+
std_grouped_rewards = std_grouped_rewards.repeat_interleave(self.num_generations, dim=0)
|
| 1051 |
+
advantages = (rewards - mean_grouped_rewards) / (std_grouped_rewards + 1e-4)
|
| 1052 |
+
|
| 1053 |
+
# Slice to keep only the local part of the data
|
| 1054 |
+
process_slice = slice(
|
| 1055 |
+
self.accelerator.process_index * len(prompts),
|
| 1056 |
+
(self.accelerator.process_index + 1) * len(prompts),
|
| 1057 |
+
)
|
| 1058 |
+
advantages = advantages[process_slice]
|
| 1059 |
+
|
| 1060 |
+
# Log the metrics
|
| 1061 |
+
reward_per_func = rewards_per_func.mean(0)
|
| 1062 |
+
for i, reward_func in enumerate(self.reward_funcs):
|
| 1063 |
+
if isinstance(reward_func, nn.Module): # Module instead of PretrainedModel for compat with compiled models
|
| 1064 |
+
reward_func_name = reward_func.config._name_or_path.split("/")[-1]
|
| 1065 |
+
else:
|
| 1066 |
+
reward_func_name = reward_func.__name__
|
| 1067 |
+
self._metrics[f"rewards/{reward_func_name}"].append(reward_per_func[i].item())
|
| 1068 |
+
|
| 1069 |
+
self._metrics["reward"].append(rewards.mean().item())
|
| 1070 |
+
self._metrics["reward_std"].append(std_grouped_rewards.mean().item())
|
| 1071 |
+
|
| 1072 |
+
if (
|
| 1073 |
+
self.log_completions
|
| 1074 |
+
and self.state.global_step % self.args.logging_steps == 0
|
| 1075 |
+
and "wandb" in self.args.report_to
|
| 1076 |
+
):
|
| 1077 |
+
import pandas as pd
|
| 1078 |
+
|
| 1079 |
+
# For logging
|
| 1080 |
+
table = {
|
| 1081 |
+
"step": [str(self.state.global_step)] * len(rewards),
|
| 1082 |
+
"prompt": gather_object(prompts_text),
|
| 1083 |
+
"completion": gather_object(completions_text),
|
| 1084 |
+
"reward": rewards.tolist(),
|
| 1085 |
+
}
|
| 1086 |
+
df = pd.DataFrame(table)
|
| 1087 |
+
|
| 1088 |
+
if wandb.run is not None and self.accelerator.is_main_process:
|
| 1089 |
+
wandb.log({"completions": wandb.Table(dataframe=df)})
|
| 1090 |
+
|
| 1091 |
+
return {
|
| 1092 |
+
"prompt_ids": prompt_ids,
|
| 1093 |
+
"prompt_mask": prompt_mask,
|
| 1094 |
+
"completion_ids": completion_ids,
|
| 1095 |
+
"completion_mask": completion_mask,
|
| 1096 |
+
"ref_per_token_logps": ref_per_token_logps,
|
| 1097 |
+
"advantages": advantages,
|
| 1098 |
+
}
|
| 1099 |
+
|
| 1100 |
+
def compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch = None):
|
| 1101 |
+
if return_outputs:
|
| 1102 |
+
raise ValueError("The GRPOTrainer does not support returning outputs")
|
| 1103 |
+
# Compute the per-token log probabilities for the model
|
| 1104 |
+
|
| 1105 |
+
prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"]
|
| 1106 |
+
completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"]
|
| 1107 |
+
input_ids = torch.cat([prompt_ids, completion_ids], dim=1)
|
| 1108 |
+
bsz, qlen = input_ids.shape
|
| 1109 |
+
attention_mask = torch.cat([prompt_mask, completion_mask], dim=1)
|
| 1110 |
+
# attention_mask = None
|
| 1111 |
+
logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens
|
| 1112 |
+
_input_ids = input_ids
|
| 1113 |
+
_logits_to_keep = logits_to_keep
|
| 1114 |
+
|
| 1115 |
+
per_token_logps = self._get_per_token_logps(model, input_ids, attention_mask, logits_to_keep)
|
| 1116 |
+
|
| 1117 |
+
# Compute the KL divergence between the model and the reference model
|
| 1118 |
+
ref_per_token_logps = inputs["ref_per_token_logps"]
|
| 1119 |
+
# per_token_kl = torch.exp(ref_per_token_logps - per_token_logps) - (ref_per_token_logps - per_token_logps) - 1
|
| 1120 |
+
|
| 1121 |
+
# x - x.detach() allows for preserving gradients from x
|
| 1122 |
+
advantages = inputs["advantages"]
|
| 1123 |
+
# per_token_loss = torch.exp(per_token_logps - per_token_logps.detach()) * advantages.unsqueeze(1)
|
| 1124 |
+
# per_token_loss = -(per_token_loss - self.beta * per_token_kl)
|
| 1125 |
+
# loss = ((per_token_loss * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean()
|
| 1126 |
+
input_ids = input_ids[:, -logits_to_keep:]
|
| 1127 |
+
if per_token_logps is not None:
|
| 1128 |
+
loss, completion_length, mean_kl = grpo_compute_loss_slow(
|
| 1129 |
+
ref_per_token_logps, per_token_logps, input_ids, completion_mask, self.beta, advantages,
|
| 1130 |
+
)
|
| 1131 |
+
else:
|
| 1132 |
+
loss, completion_length, mean_kl = grpo_accumulated_loss(
|
| 1133 |
+
self, _input_ids, logits_to_keep, completion_mask, advantages,
|
| 1134 |
+
n_chunks = self.args.unsloth_num_chunks,
|
| 1135 |
+
)
|
| 1136 |
+
|
| 1137 |
+
# Log the metrics
|
| 1138 |
+
# completion_length = self.accelerator.gather_for_metrics(completion_mask.sum(1)).float().mean().item()
|
| 1139 |
+
|
| 1140 |
+
# mean_kl = ((per_token_kl * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean()
|
| 1141 |
+
# self._metrics["kl"].append(self.accelerator.gather_for_metrics(mean_kl).mean().item())
|
| 1142 |
+
|
| 1143 |
+
if "train" in self._metrics:
|
| 1144 |
+
mode = "eval" if self.control.should_evaluate else "train"
|
| 1145 |
+
self._metrics[mode]["completion_length"].append(completion_length.item())
|
| 1146 |
+
self._metrics[mode]["kl"].append(mean_kl.item())
|
| 1147 |
+
else:
|
| 1148 |
+
self._metrics["completion_length"].append(completion_length.item())
|
| 1149 |
+
self._metrics["kl"].append(mean_kl.item())
|
| 1150 |
+
return loss
|
| 1151 |
+
|
| 1152 |
+
def prediction_step(self, model, inputs, prediction_loss_only, ignore_keys: Optional[list[str]] = None):
|
| 1153 |
+
inputs = self._prepare_inputs(inputs)
|
| 1154 |
+
with torch.no_grad():
|
| 1155 |
+
with self.compute_loss_context_manager():
|
| 1156 |
+
loss = self.compute_loss(model, inputs)
|
| 1157 |
+
loss = loss.mean().detach()
|
| 1158 |
+
return loss, None, None
|
| 1159 |
+
|
| 1160 |
+
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
|
| 1161 |
+
metrics = {key: sum(val) / len(val) for key, val in self._metrics.items()} # average the metrics
|
| 1162 |
+
|
| 1163 |
+
# This method can be called both in training and evaluation. When called in evaluation, the keys in `logs`
|
| 1164 |
+
# start with "eval_". We need to add the prefix "eval_" to the keys in `metrics` to match the format.
|
| 1165 |
+
if next(iter(logs.keys())).startswith("eval_"):
|
| 1166 |
+
metrics = {f"eval_{key}": val for key, val in metrics.items()}
|
| 1167 |
+
|
| 1168 |
+
logs = {**logs, **metrics}
|
| 1169 |
+
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
|
| 1170 |
+
super().log(logs, start_time)
|
| 1171 |
+
else: # transformers<=4.46
|
| 1172 |
+
super().log(logs)
|
| 1173 |
+
self._metrics.clear()
|
| 1174 |
+
|
| 1175 |
+
def create_model_card(
|
| 1176 |
+
self,
|
| 1177 |
+
model_name: Optional[str] = None,
|
| 1178 |
+
dataset_name: Optional[str] = None,
|
| 1179 |
+
tags: Union[str, list[str], None] = None,
|
| 1180 |
+
):
|
| 1181 |
+
"""
|
| 1182 |
+
Creates a draft of a model card using the information available to the `Trainer`.
|
| 1183 |
+
|
| 1184 |
+
Args:
|
| 1185 |
+
model_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1186 |
+
Name of the model.
|
| 1187 |
+
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1188 |
+
Name of the dataset used for training.
|
| 1189 |
+
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
| 1190 |
+
Tags to be associated with the model card.
|
| 1191 |
+
"""
|
| 1192 |
+
if not self.is_world_process_zero():
|
| 1193 |
+
return
|
| 1194 |
+
|
| 1195 |
+
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
| 1196 |
+
base_model = self.model.config._name_or_path
|
| 1197 |
+
else:
|
| 1198 |
+
base_model = None
|
| 1199 |
+
|
| 1200 |
+
tags = tags or []
|
| 1201 |
+
if isinstance(tags, str):
|
| 1202 |
+
tags = [tags]
|
| 1203 |
+
|
| 1204 |
+
if hasattr(self.model.config, "unsloth_version"):
|
| 1205 |
+
tags.append("unsloth")
|
| 1206 |
+
|
| 1207 |
+
citation = textwrap.dedent(
|
| 1208 |
+
"""\
|
| 1209 |
+
@article{zhihong2024deepseekmath,
|
| 1210 |
+
title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
|
| 1211 |
+
author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
|
| 1212 |
+
year = 2024,
|
| 1213 |
+
eprint = {arXiv:2402.03300},
|
| 1214 |
+
}
|
| 1215 |
+
"""
|
| 1216 |
+
)
|
| 1217 |
+
|
| 1218 |
+
model_card = generate_model_card(
|
| 1219 |
+
base_model=base_model,
|
| 1220 |
+
model_name=model_name,
|
| 1221 |
+
hub_model_id=self.hub_model_id,
|
| 1222 |
+
dataset_name=dataset_name,
|
| 1223 |
+
tags=tags,
|
| 1224 |
+
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
| 1225 |
+
comet_url=get_comet_experiment_url(),
|
| 1226 |
+
trainer_name="GRPO",
|
| 1227 |
+
trainer_citation=citation,
|
| 1228 |
+
paper_title="DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models",
|
| 1229 |
+
paper_id="2402.03300",
|
| 1230 |
+
)
|
| 1231 |
+
|
| 1232 |
+
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
| 1233 |
+
class UnslothGRPOTrainer(_UnslothGRPOTrainer):
|
| 1234 |
+
"""
|
| 1235 |
+
|
| 1236 |
+
Trainer for the Group Relative Policy Optimization (GRPO) method. This algorithm was initially proposed in the
|
| 1237 |
+
paper [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
|
| 1238 |
+
|
| 1239 |
+
Example:
|
| 1240 |
+
|
| 1241 |
+
```python
|
| 1242 |
+
from datasets import load_dataset
|
| 1243 |
+
from trl import GRPOTrainer
|
| 1244 |
+
|
| 1245 |
+
dataset = load_dataset("trl-lib/tldr", split="train")
|
| 1246 |
+
|
| 1247 |
+
def reward_func(completions, **kwargs):
|
| 1248 |
+
# Dummy reward function that rewards completions with more unique letters.
|
| 1249 |
+
return [float(len(set(completion))) for completion in completions]
|
| 1250 |
+
|
| 1251 |
+
trainer = GRPOTrainer(
|
| 1252 |
+
model="Qwen/Qwen2-0.5B-Instruct",
|
| 1253 |
+
reward_funcs=reward_func,
|
| 1254 |
+
train_dataset=dataset,
|
| 1255 |
+
)
|
| 1256 |
+
|
| 1257 |
+
trainer.train()
|
| 1258 |
+
```
|
| 1259 |
+
|
| 1260 |
+
Args:
|
| 1261 |
+
model (`Union[str, PreTrainedModel]`):
|
| 1262 |
+
Model to be trained. Can be either:
|
| 1263 |
+
|
| 1264 |
+
- A string, being the *model id* of a pretrained model hosted inside a model repo on huggingface.co, or
|
| 1265 |
+
a path to a *directory* containing model weights saved using
|
| 1266 |
+
[`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is
|
| 1267 |
+
loaded using [`~transformers.AutoModelForCausalLM.from_pretrained`] with the keywork arguments
|
| 1268 |
+
in `args.model_init_kwargs`.
|
| 1269 |
+
- A [`~transformers.PreTrainedModel`] object. Only causal language models are supported.
|
| 1270 |
+
reward_funcs (`Union[RewardFunc, list[RewardFunc]]`):
|
| 1271 |
+
Reward functions to be used for computing the rewards. To compute the rewards, we call all the reward
|
| 1272 |
+
functions with the prompts and completions and sum the rewards. Can be either:
|
| 1273 |
+
|
| 1274 |
+
- A single reward function, such as:
|
| 1275 |
+
- A string: The *model ID* of a pretrained model hosted inside a model repo on huggingface.co, or a
|
| 1276 |
+
path to a *directory* containing model weights saved using
|
| 1277 |
+
[`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded
|
| 1278 |
+
using [`~transformers.AutoModelForSequenceClassification.from_pretrained`] with `num_labels=1` and the
|
| 1279 |
+
keyword arguments in `args.model_init_kwargs`.
|
| 1280 |
+
- A [`~transformers.PreTrainedModel`] object: Only sequence classification models are supported.
|
| 1281 |
+
- A custom reward function: The function is provided with the prompts and the generated completions,
|
| 1282 |
+
plus any additional columns in the dataset. It should return a list of rewards. For more details, see
|
| 1283 |
+
[Using a custom reward function](#using-a-custom-reward-function).
|
| 1284 |
+
- A list of reward functions, where each item can independently be any of the above types. Mixing different
|
| 1285 |
+
types within the list (e.g., a string model ID and a custom reward function) is allowed.
|
| 1286 |
+
args ([`GRPOConfig`], *optional*, defaults to `None`):
|
| 1287 |
+
Configuration for this trainer. If `None`, a default configuration is used.
|
| 1288 |
+
train_dataset ([`~datasets.Dataset`] or [`~datasets.IterableDataset`]):
|
| 1289 |
+
Dataset to use for training. It must include a column `"prompt"`. Any additional columns in the dataset is
|
| 1290 |
+
ignored. The format of the samples can be either:
|
| 1291 |
+
|
| 1292 |
+
- [Standard](dataset_formats#standard): Each sample contains plain text.
|
| 1293 |
+
- [Conversational](dataset_formats#conversational): Each sample contains structured messages (e.g., role
|
| 1294 |
+
and content).
|
| 1295 |
+
eval_dataset ([`~datasets.Dataset`], [`~datasets.IterableDataset`] or `dict[str, Union[Dataset, IterableDataset]]`):
|
| 1296 |
+
Dataset to use for evaluation. It must meet the same requirements as `train_dataset`.
|
| 1297 |
+
processing_class ([`~transformers.PreTrainedTokenizerBase`], *optional*, defaults to `None`):
|
| 1298 |
+
Processing class used to process the data. The padding side must be set to "left". If `None`, the
|
| 1299 |
+
processing class is loaded from the model's name with [`~transformers.AutoTokenizer.from_pretrained`].
|
| 1300 |
+
reward_processing_classes (`Union[PreTrainedTokenizerBase, list[PreTrainedTokenizerBase]]`, *optional*, defaults to `None`):
|
| 1301 |
+
Processing classes corresponding to the reward functions specified in `reward_funcs`. Can be either:
|
| 1302 |
+
|
| 1303 |
+
- A single processing class: Used when `reward_funcs` contains only one reward function.
|
| 1304 |
+
- A list of processing classes: Must match the order and length of the reward functions in `reward_funcs`.
|
| 1305 |
+
If set to `None`, or if an element of the list corresponding to a [`~transformers.PreTrainedModel`] is
|
| 1306 |
+
`None`, the tokenizer for the model is automatically loaded using [`~transformers.AutoTokenizer.from_pretrained`].
|
| 1307 |
+
For elements in `reward_funcs` that are custom reward functions (not [`~transformers.PreTrainedModel`]),
|
| 1308 |
+
the corresponding entries in `reward_processing_classes` are ignored.
|
| 1309 |
+
callbacks (list of [`~transformers.TrainerCallback`], *optional*, defaults to `None`):
|
| 1310 |
+
List of callbacks to customize the training loop. Will add those to the list of default callbacks
|
| 1311 |
+
detailed in [here](https://huggingface.co/docs/transformers/main_classes/callback).
|
| 1312 |
+
|
| 1313 |
+
If you want to remove one of the default callbacks used, use the [`~transformers.Trainer.remove_callback`]
|
| 1314 |
+
method.
|
| 1315 |
+
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`):
|
| 1316 |
+
A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your
|
| 1317 |
+
model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
|
| 1318 |
+
peft_config ([`~peft.PeftConfig`], *optional*, defaults to `None`):
|
| 1319 |
+
PEFT configuration used to wrap the model. If `None`, the model is not wrapped.
|
| 1320 |
+
|
| 1321 |
+
"""
|
| 1322 |
+
def __init__(
|
| 1323 |
+
self,
|
| 1324 |
+
model,
|
| 1325 |
+
reward_funcs,
|
| 1326 |
+
args = None,
|
| 1327 |
+
train_dataset = None,
|
| 1328 |
+
eval_dataset = None,
|
| 1329 |
+
processing_class = None,
|
| 1330 |
+
reward_processing_classes = None,
|
| 1331 |
+
callbacks = None,
|
| 1332 |
+
peft_config = None,
|
| 1333 |
+
**kwargs
|
| 1334 |
+
):
|
| 1335 |
+
if args is None: args = UnslothGRPOConfig()
|
| 1336 |
+
use_bf16 = getattr(args, 'bf16', False)
|
| 1337 |
+
use_fp16 = getattr(args, 'fp16', False)
|
| 1338 |
+
force_float32 = False
|
| 1339 |
+
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
| 1340 |
+
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
| 1341 |
+
force_float32 = True
|
| 1342 |
+
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
| 1343 |
+
dtype = getattr(model.config, 'torch_dtype', None)
|
| 1344 |
+
if dtype is None: dtype = model.get_input_embeddings().dtype
|
| 1345 |
+
from unsloth_zoo.utils import _get_dtype
|
| 1346 |
+
dtype = _get_dtype(dtype)
|
| 1347 |
+
float16 = dtype == torch.float16
|
| 1348 |
+
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
| 1349 |
+
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
| 1350 |
+
if force_float32:
|
| 1351 |
+
args.fp16 = False
|
| 1352 |
+
args.bf16 = False
|
| 1353 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
| 1354 |
+
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
| 1355 |
+
args.fp16 = float16
|
| 1356 |
+
args.bf16 = not float16
|
| 1357 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
| 1358 |
+
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
| 1359 |
+
args.eval_strategy = 'steps'
|
| 1360 |
+
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
| 1361 |
+
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
| 1362 |
+
if ga_steps is not None and ga_steps > 1:
|
| 1363 |
+
from transformers import __version__ as transformers_version
|
| 1364 |
+
if Version(transformers_version) <= Version('4.45.2'):
|
| 1365 |
+
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
| 1366 |
+
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
| 1367 |
+
if getattr(args, 'eval_strategy', 'no') != 'no':
|
| 1368 |
+
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
| 1369 |
+
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
| 1370 |
+
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
| 1371 |
+
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
| 1372 |
+
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
| 1373 |
+
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
| 1374 |
+
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
| 1375 |
+
if force_float32:
|
| 1376 |
+
args.bf16_full_eval = False
|
| 1377 |
+
args.fp16_full_eval = False
|
| 1378 |
+
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
| 1379 |
+
args.bf16_full_eval = True
|
| 1380 |
+
args.fp16_full_eval = False
|
| 1381 |
+
elif not bf16_full_eval and not fp16_full_eval:
|
| 1382 |
+
args.bf16_full_eval = args.bf16
|
| 1383 |
+
args.fp16_full_eval = args.fp16
|
| 1384 |
+
_output_logits = False
|
| 1385 |
+
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
| 1386 |
+
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
| 1387 |
+
if _output_logits:
|
| 1388 |
+
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
| 1389 |
+
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
| 1390 |
+
pass
|
| 1391 |
+
else:
|
| 1392 |
+
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
| 1393 |
+
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
| 1394 |
+
if args_max_seq_length is None and model_max_seq_length is not None:
|
| 1395 |
+
max_seq_length = model.max_seq_length
|
| 1396 |
+
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
| 1397 |
+
if model is not None and hasattr(model, 'for_training'):
|
| 1398 |
+
model.for_training()
|
| 1399 |
+
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
| 1400 |
+
if 'processing_class' in locals():
|
| 1401 |
+
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
| 1402 |
+
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
| 1403 |
+
other_metrics = []
|
| 1404 |
+
if not isinstance(reward_funcs, list): _reward_funcs = [reward_funcs]
|
| 1405 |
+
else: _reward_funcs = reward_funcs
|
| 1406 |
+
for reward_func in _reward_funcs:
|
| 1407 |
+
try:
|
| 1408 |
+
reward_func_name = reward_func.__name__
|
| 1409 |
+
other_metrics.append(f'rewards/{reward_func_name}')
|
| 1410 |
+
except: pass
|
| 1411 |
+
|
| 1412 |
+
from unsloth_zoo.logging_utils import PatchRLStatistics
|
| 1413 |
+
PatchRLStatistics('grpo_trainer', other_metrics)
|
| 1414 |
+
|
| 1415 |
+
super().__init__(
|
| 1416 |
+
model = model,
|
| 1417 |
+
reward_funcs = reward_funcs,
|
| 1418 |
+
args = args,
|
| 1419 |
+
train_dataset = train_dataset,
|
| 1420 |
+
eval_dataset = eval_dataset,
|
| 1421 |
+
processing_class = processing_class,
|
| 1422 |
+
reward_processing_classes = reward_processing_classes,
|
| 1423 |
+
callbacks = callbacks,
|
| 1424 |
+
peft_config = peft_config,**kwargs)
|
| 1425 |
+
if hasattr(self, 'neftune_hook_handle'):
|
| 1426 |
+
self.neftune_hook_handle.remove()
|
| 1427 |
+
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
| 1428 |
+
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
| 1429 |
+
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
| 1430 |
+
pass
|
| 1431 |
+
|
| 1432 |
+
pass
|
unsloth_compiled_cache/UnslothKTOTrainer.py
ADDED
|
@@ -0,0 +1,1834 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
2025.4.1
|
| 3 |
+
2025.4.1
|
| 4 |
+
4.51.3
|
| 5 |
+
0.15.2
|
| 6 |
+
__UNSLOTH_VERSIONING__
|
| 7 |
+
"""
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from torch.nn import functional as F
|
| 12 |
+
from trl.trainer.kto_trainer import (Any, AutoModelForCausalLM, BaseImageProcessor, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, KTOConfig, KTOTrainer, Literal, Optional, PartialState, PeftModel, PreTrainedModel, PreTrainedModelWrapper, PreTrainedTokenizerBase, ProcessorMixin, SequentialSampler, Trainer, TrainerCallback, TrainingArguments, Union, _get_kl_dataset, _process_tokens, _tokenize, amp, concatenate_datasets, contextmanager, create_reference_model, deepcopy, defaultdict, disable_dropout_in_model, generate_model_card, get_comet_experiment_url, has_length, inspect, is_comet_available, is_peft_available, is_wandb_available, itemgetter, log_table_to_comet_experiment, maybe_apply_chat_template, maybe_extract_prompt, maybe_unpair_preference_dataset, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_model_for_kbit_training, random, textwrap, torch, tqdm, transformers, version, warnings)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from typing import *
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from packaging.version import Version
|
| 19 |
+
import torch
|
| 20 |
+
import numpy as np
|
| 21 |
+
from contextlib import nullcontext
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling
|
| 24 |
+
|
| 25 |
+
torch_compile_options = {
|
| 26 |
+
"epilogue_fusion" : True,
|
| 27 |
+
"max_autotune" : False,
|
| 28 |
+
"shape_padding" : True,
|
| 29 |
+
"trace.enabled" : False,
|
| 30 |
+
"triton.cudagraphs" : False,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
| 34 |
+
def selective_log_softmax(logits, index):
|
| 35 |
+
logits = logits.to(torch.float32)
|
| 36 |
+
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
| 37 |
+
# loop to reduce peak mem consumption
|
| 38 |
+
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
| 39 |
+
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
| 40 |
+
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
| 41 |
+
return per_token_logps
|
| 42 |
+
@dataclass
|
| 43 |
+
class UnslothKTOConfig(KTOConfig):
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
Configuration class for the [`KTOTrainer`].
|
| 47 |
+
|
| 48 |
+
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
| 49 |
+
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
| 50 |
+
command line.
|
| 51 |
+
|
| 52 |
+
Parameters:
|
| 53 |
+
learning_rate (`float`, *optional*, defaults to `5e-7`):
|
| 54 |
+
Initial learning rate for [`AdamW`] optimizer. The default value replaces that of
|
| 55 |
+
[`~transformers.TrainingArguments`].
|
| 56 |
+
max_length (`int` or `None`, *optional*, defaults to `1024`):
|
| 57 |
+
Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
|
| 58 |
+
to use the default data collator.
|
| 59 |
+
max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
|
| 60 |
+
Maximum length of the prompt. This argument is required if you want to use the default data collator.
|
| 61 |
+
max_completion_length (`int` or `None`, *optional*, defaults to `None`):
|
| 62 |
+
Maximum length of the completion. This argument is required if you want to use the default data collator
|
| 63 |
+
and your model is an encoder-decoder.
|
| 64 |
+
beta (`float`, *optional*, defaults to `0.1`):
|
| 65 |
+
Parameter controlling the deviation from the reference model. Higher β means less deviation from the
|
| 66 |
+
reference model.
|
| 67 |
+
loss_type (`str`, *optional*, defaults to `"kto"`):
|
| 68 |
+
Type of loss to use. Possible values are:
|
| 69 |
+
|
| 70 |
+
- `"kto"`: KTO loss from the [KTO](https://huggingface.co/papers/2402.01306) paper.
|
| 71 |
+
- `"apo_zero_unpaired"`: Unpaired variant of APO-zero loss from the [APO](https://huggingface.co/papers/2408.06266) paper.
|
| 72 |
+
|
| 73 |
+
desirable_weight (`float`, *optional*, defaults to `1.0`):
|
| 74 |
+
Desirable losses are weighed by this factor to counter unequal number of desirable and undesirable paris.
|
| 75 |
+
undesirable_weight (`float`, *optional*, defaults to `1.0`):
|
| 76 |
+
Undesirable losses are weighed by this factor to counter unequal number of desirable and undesirable pairs.
|
| 77 |
+
label_pad_token_id (`int`, *optional*, defaults to `-100`):
|
| 78 |
+
Label pad token id. This argument is required if you want to use the default data collator.
|
| 79 |
+
padding_value (`int` or `None`, *optional*, defaults to `None`):
|
| 80 |
+
Padding value to use. If `None`, the padding value of the tokenizer is used.
|
| 81 |
+
truncation_mode (`str`, *optional*, defaults to `"keep_end"`):
|
| 82 |
+
Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
|
| 83 |
+
This argument is required if you want to use the default data collator.
|
| 84 |
+
generate_during_eval (`bool`, *optional*, defaults to `False`):
|
| 85 |
+
If `True`, generates and logs completions from both the model and the reference model to W&B or Comet during
|
| 86 |
+
evaluation.
|
| 87 |
+
is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`):
|
| 88 |
+
When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
|
| 89 |
+
you need to specify if the model returned by the callable is an encoder-decoder model.
|
| 90 |
+
precompute_ref_log_probs (`bool`, *optional*, defaults to `False`):
|
| 91 |
+
Whether to precompute reference model log probabilities for training and evaluation datasets. This is
|
| 92 |
+
useful when training without the reference model to reduce the total GPU memory needed.
|
| 93 |
+
model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
|
| 94 |
+
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
|
| 95 |
+
string.
|
| 96 |
+
ref_model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
|
| 97 |
+
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model
|
| 98 |
+
from a string.
|
| 99 |
+
dataset_num_proc: (`int` or `None`, *optional*, defaults to `None`):
|
| 100 |
+
Number of processes to use for processing the dataset.
|
| 101 |
+
disable_dropout (`bool`, *optional*, defaults to `True`):
|
| 102 |
+
Whether to disable dropout in the model and reference model.
|
| 103 |
+
|
| 104 |
+
"""
|
| 105 |
+
vllm_sampling_params: Optional[Any] = field(
|
| 106 |
+
default = None,
|
| 107 |
+
metadata = {'help': 'vLLM SamplingParams'},
|
| 108 |
+
)
|
| 109 |
+
unsloth_num_chunks : Optional[int] = field(
|
| 110 |
+
default = -1,
|
| 111 |
+
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
| 112 |
+
)
|
| 113 |
+
def __init__(
|
| 114 |
+
self,
|
| 115 |
+
output_dir = None,
|
| 116 |
+
overwrite_output_dir = None,
|
| 117 |
+
do_train = False,
|
| 118 |
+
do_eval = False,
|
| 119 |
+
do_predict = False,
|
| 120 |
+
eval_strategy = 'no',
|
| 121 |
+
prediction_loss_only = False,
|
| 122 |
+
per_device_train_batch_size = 4,
|
| 123 |
+
per_device_eval_batch_size = 4,
|
| 124 |
+
per_gpu_train_batch_size = None,
|
| 125 |
+
per_gpu_eval_batch_size = None,
|
| 126 |
+
gradient_accumulation_steps = 2,
|
| 127 |
+
eval_accumulation_steps = 2,
|
| 128 |
+
eval_delay = 0,
|
| 129 |
+
torch_empty_cache_steps = 250,
|
| 130 |
+
learning_rate = 5e-05,
|
| 131 |
+
weight_decay = 0.01,
|
| 132 |
+
adam_beta1 = 0.9,
|
| 133 |
+
adam_beta2 = 0.999,
|
| 134 |
+
adam_epsilon = 1e-08,
|
| 135 |
+
max_grad_norm = 1.0,
|
| 136 |
+
num_train_epochs = 3.0,
|
| 137 |
+
max_steps = -1,
|
| 138 |
+
lr_scheduler_type = 'linear',
|
| 139 |
+
warmup_ratio = 0.1,
|
| 140 |
+
warmup_steps = 0,
|
| 141 |
+
log_level = 'passive',
|
| 142 |
+
log_level_replica = 'warning',
|
| 143 |
+
log_on_each_node = True,
|
| 144 |
+
logging_dir = None,
|
| 145 |
+
logging_strategy = 'steps',
|
| 146 |
+
logging_first_step = False,
|
| 147 |
+
logging_steps = 1,
|
| 148 |
+
logging_nan_inf_filter = False,
|
| 149 |
+
save_strategy = 'steps',
|
| 150 |
+
save_steps = 500,
|
| 151 |
+
save_total_limit = None,
|
| 152 |
+
save_safetensors = True,
|
| 153 |
+
save_on_each_node = False,
|
| 154 |
+
save_only_model = False,
|
| 155 |
+
restore_callback_states_from_checkpoint = False,
|
| 156 |
+
no_cuda = False,
|
| 157 |
+
use_cpu = False,
|
| 158 |
+
use_mps_device = False,
|
| 159 |
+
seed = 3407,
|
| 160 |
+
data_seed = 3407,
|
| 161 |
+
jit_mode_eval = False,
|
| 162 |
+
use_ipex = False,
|
| 163 |
+
bf16 = False,
|
| 164 |
+
fp16 = False,
|
| 165 |
+
fp16_opt_level = 'O1',
|
| 166 |
+
half_precision_backend = 'auto',
|
| 167 |
+
bf16_full_eval = False,
|
| 168 |
+
fp16_full_eval = False,
|
| 169 |
+
tf32 = None,
|
| 170 |
+
local_rank = -1,
|
| 171 |
+
ddp_backend = None,
|
| 172 |
+
tpu_num_cores = None,
|
| 173 |
+
tpu_metrics_debug = False,
|
| 174 |
+
debug = '',
|
| 175 |
+
dataloader_drop_last = False,
|
| 176 |
+
eval_steps = None,
|
| 177 |
+
dataloader_num_workers = 0,
|
| 178 |
+
dataloader_prefetch_factor = None,
|
| 179 |
+
past_index = -1,
|
| 180 |
+
run_name = None,
|
| 181 |
+
disable_tqdm = None,
|
| 182 |
+
remove_unused_columns = True,
|
| 183 |
+
label_names = None,
|
| 184 |
+
load_best_model_at_end = False,
|
| 185 |
+
metric_for_best_model = None,
|
| 186 |
+
greater_is_better = None,
|
| 187 |
+
ignore_data_skip = False,
|
| 188 |
+
fsdp = '',
|
| 189 |
+
fsdp_min_num_params = 0,
|
| 190 |
+
fsdp_config = None,
|
| 191 |
+
tp_size = 0,
|
| 192 |
+
fsdp_transformer_layer_cls_to_wrap = None,
|
| 193 |
+
accelerator_config = None,
|
| 194 |
+
deepspeed = None,
|
| 195 |
+
label_smoothing_factor = 0.0,
|
| 196 |
+
optim = 'adamw_8bit',
|
| 197 |
+
optim_args = None,
|
| 198 |
+
adafactor = False,
|
| 199 |
+
group_by_length = False,
|
| 200 |
+
length_column_name = 'length',
|
| 201 |
+
report_to = None,
|
| 202 |
+
ddp_find_unused_parameters = None,
|
| 203 |
+
ddp_bucket_cap_mb = None,
|
| 204 |
+
ddp_broadcast_buffers = None,
|
| 205 |
+
dataloader_pin_memory = True,
|
| 206 |
+
dataloader_persistent_workers = False,
|
| 207 |
+
skip_memory_metrics = True,
|
| 208 |
+
use_legacy_prediction_loop = False,
|
| 209 |
+
push_to_hub = False,
|
| 210 |
+
resume_from_checkpoint = None,
|
| 211 |
+
hub_model_id = None,
|
| 212 |
+
hub_strategy = 'every_save',
|
| 213 |
+
hub_token = None,
|
| 214 |
+
hub_private_repo = None,
|
| 215 |
+
hub_always_push = False,
|
| 216 |
+
gradient_checkpointing = False,
|
| 217 |
+
gradient_checkpointing_kwargs = None,
|
| 218 |
+
include_inputs_for_metrics = False,
|
| 219 |
+
eval_do_concat_batches = True,
|
| 220 |
+
fp16_backend = 'auto',
|
| 221 |
+
push_to_hub_model_id = None,
|
| 222 |
+
push_to_hub_organization = None,
|
| 223 |
+
push_to_hub_token = None,
|
| 224 |
+
mp_parameters = '',
|
| 225 |
+
auto_find_batch_size = False,
|
| 226 |
+
full_determinism = False,
|
| 227 |
+
torchdynamo = None,
|
| 228 |
+
ray_scope = 'last',
|
| 229 |
+
ddp_timeout = 1800,
|
| 230 |
+
torch_compile = False,
|
| 231 |
+
torch_compile_backend = None,
|
| 232 |
+
torch_compile_mode = None,
|
| 233 |
+
include_tokens_per_second = False,
|
| 234 |
+
include_num_input_tokens_seen = False,
|
| 235 |
+
neftune_noise_alpha = None,
|
| 236 |
+
optim_target_modules = None,
|
| 237 |
+
batch_eval_metrics = False,
|
| 238 |
+
eval_on_start = False,
|
| 239 |
+
use_liger_kernel = False,
|
| 240 |
+
eval_use_gather_object = False,
|
| 241 |
+
average_tokens_across_devices = False,
|
| 242 |
+
max_length = 1024,
|
| 243 |
+
max_prompt_length = 512,
|
| 244 |
+
max_completion_length = None,
|
| 245 |
+
beta = 0.1,
|
| 246 |
+
loss_type = 'kto',
|
| 247 |
+
desirable_weight = 1.0,
|
| 248 |
+
undesirable_weight = 1.0,
|
| 249 |
+
label_pad_token_id = -100,
|
| 250 |
+
padding_value = None,
|
| 251 |
+
truncation_mode = 'keep_end',
|
| 252 |
+
generate_during_eval = False,
|
| 253 |
+
is_encoder_decoder = None,
|
| 254 |
+
disable_dropout = True,
|
| 255 |
+
precompute_ref_log_probs = False,
|
| 256 |
+
model_init_kwargs = None,
|
| 257 |
+
ref_model_init_kwargs = None,
|
| 258 |
+
dataset_num_proc = None,
|
| 259 |
+
vllm_sampling_params = None,
|
| 260 |
+
unsloth_num_chunks = -1,
|
| 261 |
+
**kwargs,
|
| 262 |
+
):
|
| 263 |
+
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
| 264 |
+
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
| 265 |
+
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
| 266 |
+
output_dir = 'unsloth_training_checkpoints'
|
| 267 |
+
save_strategy = 'no'
|
| 268 |
+
if dataset_num_proc is None:
|
| 269 |
+
from multiprocessing import cpu_count
|
| 270 |
+
dataset_num_proc = cpu_count()
|
| 271 |
+
|
| 272 |
+
super().__init__(
|
| 273 |
+
output_dir = output_dir,
|
| 274 |
+
overwrite_output_dir = overwrite_output_dir,
|
| 275 |
+
do_train = do_train,
|
| 276 |
+
do_eval = do_eval,
|
| 277 |
+
do_predict = do_predict,
|
| 278 |
+
eval_strategy = eval_strategy,
|
| 279 |
+
prediction_loss_only = prediction_loss_only,
|
| 280 |
+
per_device_train_batch_size = per_device_train_batch_size,
|
| 281 |
+
per_device_eval_batch_size = per_device_eval_batch_size,
|
| 282 |
+
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
| 283 |
+
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
| 284 |
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 285 |
+
eval_accumulation_steps = eval_accumulation_steps,
|
| 286 |
+
eval_delay = eval_delay,
|
| 287 |
+
torch_empty_cache_steps = torch_empty_cache_steps,
|
| 288 |
+
learning_rate = learning_rate,
|
| 289 |
+
weight_decay = weight_decay,
|
| 290 |
+
adam_beta1 = adam_beta1,
|
| 291 |
+
adam_beta2 = adam_beta2,
|
| 292 |
+
adam_epsilon = adam_epsilon,
|
| 293 |
+
max_grad_norm = max_grad_norm,
|
| 294 |
+
num_train_epochs = num_train_epochs,
|
| 295 |
+
max_steps = max_steps,
|
| 296 |
+
lr_scheduler_type = lr_scheduler_type,
|
| 297 |
+
warmup_ratio = warmup_ratio,
|
| 298 |
+
warmup_steps = warmup_steps,
|
| 299 |
+
log_level = log_level,
|
| 300 |
+
log_level_replica = log_level_replica,
|
| 301 |
+
log_on_each_node = log_on_each_node,
|
| 302 |
+
logging_dir = logging_dir,
|
| 303 |
+
logging_strategy = logging_strategy,
|
| 304 |
+
logging_first_step = logging_first_step,
|
| 305 |
+
logging_steps = logging_steps,
|
| 306 |
+
logging_nan_inf_filter = logging_nan_inf_filter,
|
| 307 |
+
save_strategy = save_strategy,
|
| 308 |
+
save_steps = save_steps,
|
| 309 |
+
save_total_limit = save_total_limit,
|
| 310 |
+
save_safetensors = save_safetensors,
|
| 311 |
+
save_on_each_node = save_on_each_node,
|
| 312 |
+
save_only_model = save_only_model,
|
| 313 |
+
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
| 314 |
+
no_cuda = no_cuda,
|
| 315 |
+
use_cpu = use_cpu,
|
| 316 |
+
use_mps_device = use_mps_device,
|
| 317 |
+
seed = seed,
|
| 318 |
+
data_seed = data_seed,
|
| 319 |
+
jit_mode_eval = jit_mode_eval,
|
| 320 |
+
use_ipex = use_ipex,
|
| 321 |
+
bf16 = bf16,
|
| 322 |
+
fp16 = fp16,
|
| 323 |
+
fp16_opt_level = fp16_opt_level,
|
| 324 |
+
half_precision_backend = half_precision_backend,
|
| 325 |
+
bf16_full_eval = bf16_full_eval,
|
| 326 |
+
fp16_full_eval = fp16_full_eval,
|
| 327 |
+
tf32 = tf32,
|
| 328 |
+
local_rank = local_rank,
|
| 329 |
+
ddp_backend = ddp_backend,
|
| 330 |
+
tpu_num_cores = tpu_num_cores,
|
| 331 |
+
tpu_metrics_debug = tpu_metrics_debug,
|
| 332 |
+
debug = debug,
|
| 333 |
+
dataloader_drop_last = dataloader_drop_last,
|
| 334 |
+
eval_steps = eval_steps,
|
| 335 |
+
dataloader_num_workers = dataloader_num_workers,
|
| 336 |
+
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
| 337 |
+
past_index = past_index,
|
| 338 |
+
run_name = run_name,
|
| 339 |
+
disable_tqdm = disable_tqdm,
|
| 340 |
+
remove_unused_columns = remove_unused_columns,
|
| 341 |
+
label_names = label_names,
|
| 342 |
+
load_best_model_at_end = load_best_model_at_end,
|
| 343 |
+
metric_for_best_model = metric_for_best_model,
|
| 344 |
+
greater_is_better = greater_is_better,
|
| 345 |
+
ignore_data_skip = ignore_data_skip,
|
| 346 |
+
fsdp = fsdp,
|
| 347 |
+
fsdp_min_num_params = fsdp_min_num_params,
|
| 348 |
+
fsdp_config = fsdp_config,
|
| 349 |
+
tp_size = tp_size,
|
| 350 |
+
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
| 351 |
+
accelerator_config = accelerator_config,
|
| 352 |
+
deepspeed = deepspeed,
|
| 353 |
+
label_smoothing_factor = label_smoothing_factor,
|
| 354 |
+
optim = optim,
|
| 355 |
+
optim_args = optim_args,
|
| 356 |
+
adafactor = adafactor,
|
| 357 |
+
group_by_length = group_by_length,
|
| 358 |
+
length_column_name = length_column_name,
|
| 359 |
+
report_to = report_to,
|
| 360 |
+
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
| 361 |
+
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
| 362 |
+
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
| 363 |
+
dataloader_pin_memory = dataloader_pin_memory,
|
| 364 |
+
dataloader_persistent_workers = dataloader_persistent_workers,
|
| 365 |
+
skip_memory_metrics = skip_memory_metrics,
|
| 366 |
+
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
| 367 |
+
push_to_hub = push_to_hub,
|
| 368 |
+
resume_from_checkpoint = resume_from_checkpoint,
|
| 369 |
+
hub_model_id = hub_model_id,
|
| 370 |
+
hub_strategy = hub_strategy,
|
| 371 |
+
hub_token = hub_token,
|
| 372 |
+
hub_private_repo = hub_private_repo,
|
| 373 |
+
hub_always_push = hub_always_push,
|
| 374 |
+
gradient_checkpointing = gradient_checkpointing,
|
| 375 |
+
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
| 376 |
+
include_inputs_for_metrics = include_inputs_for_metrics,
|
| 377 |
+
eval_do_concat_batches = eval_do_concat_batches,
|
| 378 |
+
fp16_backend = fp16_backend,
|
| 379 |
+
push_to_hub_model_id = push_to_hub_model_id,
|
| 380 |
+
push_to_hub_organization = push_to_hub_organization,
|
| 381 |
+
push_to_hub_token = push_to_hub_token,
|
| 382 |
+
mp_parameters = mp_parameters,
|
| 383 |
+
auto_find_batch_size = auto_find_batch_size,
|
| 384 |
+
full_determinism = full_determinism,
|
| 385 |
+
torchdynamo = torchdynamo,
|
| 386 |
+
ray_scope = ray_scope,
|
| 387 |
+
ddp_timeout = ddp_timeout,
|
| 388 |
+
torch_compile = torch_compile,
|
| 389 |
+
torch_compile_backend = torch_compile_backend,
|
| 390 |
+
torch_compile_mode = torch_compile_mode,
|
| 391 |
+
include_tokens_per_second = include_tokens_per_second,
|
| 392 |
+
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
| 393 |
+
neftune_noise_alpha = neftune_noise_alpha,
|
| 394 |
+
optim_target_modules = optim_target_modules,
|
| 395 |
+
batch_eval_metrics = batch_eval_metrics,
|
| 396 |
+
eval_on_start = eval_on_start,
|
| 397 |
+
use_liger_kernel = use_liger_kernel,
|
| 398 |
+
eval_use_gather_object = eval_use_gather_object,
|
| 399 |
+
average_tokens_across_devices = average_tokens_across_devices,
|
| 400 |
+
max_length = max_length,
|
| 401 |
+
max_prompt_length = max_prompt_length,
|
| 402 |
+
max_completion_length = max_completion_length,
|
| 403 |
+
beta = beta,
|
| 404 |
+
loss_type = loss_type,
|
| 405 |
+
desirable_weight = desirable_weight,
|
| 406 |
+
undesirable_weight = undesirable_weight,
|
| 407 |
+
label_pad_token_id = label_pad_token_id,
|
| 408 |
+
padding_value = padding_value,
|
| 409 |
+
truncation_mode = truncation_mode,
|
| 410 |
+
generate_during_eval = generate_during_eval,
|
| 411 |
+
is_encoder_decoder = is_encoder_decoder,
|
| 412 |
+
disable_dropout = disable_dropout,
|
| 413 |
+
precompute_ref_log_probs = precompute_ref_log_probs,
|
| 414 |
+
model_init_kwargs = model_init_kwargs,
|
| 415 |
+
ref_model_init_kwargs = ref_model_init_kwargs,
|
| 416 |
+
dataset_num_proc = dataset_num_proc,**kwargs)
|
| 417 |
+
self.vllm_sampling_params = vllm_sampling_params
|
| 418 |
+
self.unsloth_num_chunks = unsloth_num_chunks
|
| 419 |
+
pass
|
| 420 |
+
|
| 421 |
+
class _UnslothKTOTrainer(Trainer):
|
| 422 |
+
r""""""
|
| 423 |
+
|
| 424 |
+
_tag_names = ["trl", "kto"]
|
| 425 |
+
|
| 426 |
+
def __init__(
|
| 427 |
+
self,
|
| 428 |
+
model: Union[PreTrainedModel, nn.Module, str] = None,
|
| 429 |
+
ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
|
| 430 |
+
args: KTOConfig = None,
|
| 431 |
+
train_dataset: Optional[Dataset] = None,
|
| 432 |
+
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
|
| 433 |
+
processing_class: Optional[
|
| 434 |
+
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
|
| 435 |
+
] = None,
|
| 436 |
+
data_collator: Optional[DataCollator] = None,
|
| 437 |
+
model_init: Optional[Callable[[], PreTrainedModel]] = None,
|
| 438 |
+
callbacks: Optional[list[TrainerCallback]] = None,
|
| 439 |
+
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
| 440 |
+
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
| 441 |
+
peft_config: Optional[dict] = None,
|
| 442 |
+
compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
|
| 443 |
+
model_adapter_name: Optional[str] = None,
|
| 444 |
+
ref_adapter_name: Optional[str] = None,
|
| 445 |
+
):
|
| 446 |
+
if type(args) is TrainingArguments:
|
| 447 |
+
raise ValueError("Please use `KTOConfig` instead TrainingArguments.")
|
| 448 |
+
|
| 449 |
+
if not isinstance(model, str) and ref_model is model:
|
| 450 |
+
raise ValueError(
|
| 451 |
+
"`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the "
|
| 452 |
+
"same as `model`, you must mass a copy of it, or `None` if you use peft."
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
if args.model_init_kwargs is None:
|
| 456 |
+
model_init_kwargs = {}
|
| 457 |
+
elif not isinstance(model, str):
|
| 458 |
+
raise ValueError("You passed model_kwargs to the KTOTrainer. But your model is already instantiated.")
|
| 459 |
+
else:
|
| 460 |
+
model_init_kwargs = args.model_init_kwargs
|
| 461 |
+
torch_dtype = model_init_kwargs.get("torch_dtype")
|
| 462 |
+
if torch_dtype is not None:
|
| 463 |
+
# Convert to `torch.dtype` if an str is passed
|
| 464 |
+
if isinstance(torch_dtype, str) and torch_dtype != "auto":
|
| 465 |
+
torch_dtype = getattr(torch, torch_dtype)
|
| 466 |
+
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
|
| 467 |
+
raise ValueError(
|
| 468 |
+
f"Invalid `torch_dtype` passed to the KTOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
|
| 469 |
+
)
|
| 470 |
+
model_init_kwargs["torch_dtype"] = torch_dtype
|
| 471 |
+
|
| 472 |
+
if args.ref_model_init_kwargs is None:
|
| 473 |
+
ref_model_init_kwargs = {}
|
| 474 |
+
elif not isinstance(ref_model, str):
|
| 475 |
+
raise ValueError(
|
| 476 |
+
"You passed ref_model_kwargs to the KTOTrainer. But your ref_model is already instantiated."
|
| 477 |
+
)
|
| 478 |
+
else:
|
| 479 |
+
ref_model_init_kwargs = args.ref_model_init_kwargs
|
| 480 |
+
torch_dtype = ref_model_init_kwargs.get("torch_dtype")
|
| 481 |
+
if torch_dtype is not None:
|
| 482 |
+
# Convert to `torch.dtype` if an str is passed
|
| 483 |
+
if isinstance(torch_dtype, str) and torch_dtype != "auto":
|
| 484 |
+
torch_dtype = getattr(torch, torch_dtype)
|
| 485 |
+
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
|
| 486 |
+
raise ValueError(
|
| 487 |
+
f"Invalid `torch_dtype` passed to the KTOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
|
| 488 |
+
)
|
| 489 |
+
ref_model_init_kwargs["torch_dtype"] = torch_dtype
|
| 490 |
+
|
| 491 |
+
if isinstance(model, str):
|
| 492 |
+
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
|
| 493 |
+
|
| 494 |
+
if isinstance(ref_model, str):
|
| 495 |
+
ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs)
|
| 496 |
+
|
| 497 |
+
# Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
|
| 498 |
+
# has been called in order to properly call autocast if needed.
|
| 499 |
+
self._peft_has_been_casted_to_bf16 = False
|
| 500 |
+
|
| 501 |
+
if not is_peft_available() and peft_config is not None:
|
| 502 |
+
raise ValueError(
|
| 503 |
+
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it with `pip install peft` to use the PEFT models"
|
| 504 |
+
)
|
| 505 |
+
elif is_peft_available() and peft_config is not None:
|
| 506 |
+
# if model is a peft model and we have a peft_config, we merge and unload it first
|
| 507 |
+
if isinstance(model, PeftModel):
|
| 508 |
+
model = model.merge_and_unload()
|
| 509 |
+
|
| 510 |
+
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
|
| 511 |
+
_support_gc_kwargs = hasattr(
|
| 512 |
+
args, "gradient_checkpointing_kwargs"
|
| 513 |
+
) and "gradient_checkpointing_kwargs" in list(
|
| 514 |
+
inspect.signature(prepare_model_for_kbit_training).parameters
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
|
| 518 |
+
|
| 519 |
+
if _support_gc_kwargs:
|
| 520 |
+
prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
|
| 521 |
+
|
| 522 |
+
model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
|
| 523 |
+
elif getattr(args, "gradient_checkpointing", False):
|
| 524 |
+
# For backward compatibility with older versions of transformers
|
| 525 |
+
if hasattr(model, "enable_input_require_grads"):
|
| 526 |
+
model.enable_input_require_grads()
|
| 527 |
+
else:
|
| 528 |
+
|
| 529 |
+
def make_inputs_require_grad(module, input, output):
|
| 530 |
+
output.requires_grad_(True)
|
| 531 |
+
|
| 532 |
+
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
| 533 |
+
|
| 534 |
+
# get peft model with the given config
|
| 535 |
+
model = model
|
| 536 |
+
if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
|
| 537 |
+
peft_module_casting_to_bf16(model)
|
| 538 |
+
# If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
|
| 539 |
+
self._peft_has_been_casted_to_bf16 = True
|
| 540 |
+
|
| 541 |
+
# For models that use gradient_checkpointing, we need to attach a hook that enables input
|
| 542 |
+
# to explicitly have `requires_grad=True`, otherwise training will either silently
|
| 543 |
+
# fail or completely fail.
|
| 544 |
+
elif getattr(args, "gradient_checkpointing", False):
|
| 545 |
+
# For backward compatibility with older versions of transformers
|
| 546 |
+
if hasattr(model, "enable_input_require_grads"):
|
| 547 |
+
model.enable_input_require_grads()
|
| 548 |
+
else:
|
| 549 |
+
|
| 550 |
+
def make_inputs_require_grad(module, input, output):
|
| 551 |
+
output.requires_grad_(True)
|
| 552 |
+
|
| 553 |
+
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
| 554 |
+
|
| 555 |
+
if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
|
| 556 |
+
raise ValueError(
|
| 557 |
+
"`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
|
| 558 |
+
" Please install `wandb` or `comet-ml` to resolve."
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
if model is not None:
|
| 562 |
+
self.is_encoder_decoder = model.config.is_encoder_decoder
|
| 563 |
+
elif args.is_encoder_decoder is None:
|
| 564 |
+
raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
|
| 565 |
+
else:
|
| 566 |
+
self.is_encoder_decoder = args.is_encoder_decoder
|
| 567 |
+
|
| 568 |
+
self.is_peft_model = is_peft_available() and isinstance(model, PeftModel)
|
| 569 |
+
self.model_adapter_name = model_adapter_name
|
| 570 |
+
self.ref_adapter_name = ref_adapter_name
|
| 571 |
+
|
| 572 |
+
if ref_model:
|
| 573 |
+
self.ref_model = ref_model
|
| 574 |
+
elif self.is_peft_model or args.precompute_ref_log_probs:
|
| 575 |
+
# The `model` with adapters turned off will be used as the reference model
|
| 576 |
+
self.ref_model = None
|
| 577 |
+
else:
|
| 578 |
+
self.ref_model = create_reference_model(model)
|
| 579 |
+
|
| 580 |
+
if processing_class is None:
|
| 581 |
+
raise ValueError(
|
| 582 |
+
"max_length or a processing_class must be specified when using the default DPODataCollatorWithPadding"
|
| 583 |
+
)
|
| 584 |
+
if args.max_length is None:
|
| 585 |
+
warnings.warn(
|
| 586 |
+
"When using DPODataCollatorWithPadding, you should set `max_length` in the KTOTrainer's init"
|
| 587 |
+
" it will be set to `512` by default, but you should do it yourself in the future.",
|
| 588 |
+
UserWarning,
|
| 589 |
+
)
|
| 590 |
+
max_length = 512
|
| 591 |
+
if args.max_length is not None:
|
| 592 |
+
max_length = args.max_length
|
| 593 |
+
|
| 594 |
+
if args.max_prompt_length is None:
|
| 595 |
+
warnings.warn(
|
| 596 |
+
"When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the KTOTrainer's init"
|
| 597 |
+
" it will be set to `128` by default, but you should do it yourself in the future.",
|
| 598 |
+
UserWarning,
|
| 599 |
+
)
|
| 600 |
+
max_prompt_length = 128
|
| 601 |
+
if args.max_prompt_length is not None:
|
| 602 |
+
max_prompt_length = args.max_prompt_length
|
| 603 |
+
|
| 604 |
+
max_completion_length = None
|
| 605 |
+
if args.max_completion_length is None and self.is_encoder_decoder:
|
| 606 |
+
warnings.warn(
|
| 607 |
+
"When using DPODataCollatorWithPadding with an encoder decoder architecture, you should set `max_completion_length` in the KTOTrainer's init"
|
| 608 |
+
" it will be set to `128` by default, but you should do it yourself in the future.",
|
| 609 |
+
UserWarning,
|
| 610 |
+
)
|
| 611 |
+
max_completion_length = 128
|
| 612 |
+
if args.max_completion_length is not None and self.is_encoder_decoder:
|
| 613 |
+
max_completion_length = args.max_completion_length
|
| 614 |
+
|
| 615 |
+
if data_collator is None:
|
| 616 |
+
data_collator = DPODataCollatorWithPadding(
|
| 617 |
+
pad_token_id=processing_class.pad_token_id,
|
| 618 |
+
label_pad_token_id=args.label_pad_token_id,
|
| 619 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
if args.remove_unused_columns:
|
| 623 |
+
args.remove_unused_columns = False
|
| 624 |
+
# warn users
|
| 625 |
+
warnings.warn(
|
| 626 |
+
"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your KTOConfig"
|
| 627 |
+
" we have set it for you, but you should do it yourself in the future.",
|
| 628 |
+
UserWarning,
|
| 629 |
+
)
|
| 630 |
+
|
| 631 |
+
self.use_dpo_data_collator = True
|
| 632 |
+
else:
|
| 633 |
+
self.use_dpo_data_collator = False
|
| 634 |
+
|
| 635 |
+
# Disable dropout in the model and reference model
|
| 636 |
+
if args.disable_dropout:
|
| 637 |
+
disable_dropout_in_model(model)
|
| 638 |
+
if self.ref_model is not None:
|
| 639 |
+
disable_dropout_in_model(self.ref_model)
|
| 640 |
+
|
| 641 |
+
self.loss_type = args.loss_type
|
| 642 |
+
self.max_length = max_length
|
| 643 |
+
self.generate_during_eval = args.generate_during_eval
|
| 644 |
+
self.label_pad_token_id = args.label_pad_token_id
|
| 645 |
+
self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id
|
| 646 |
+
self.max_prompt_length = max_prompt_length
|
| 647 |
+
self.truncation_mode = args.truncation_mode
|
| 648 |
+
self.max_completion_length = max_completion_length
|
| 649 |
+
self.processing_class = processing_class
|
| 650 |
+
self.precompute_ref_log_probs = args.precompute_ref_log_probs
|
| 651 |
+
|
| 652 |
+
# Not all losses require a KL calculation
|
| 653 |
+
self.calculate_KL = True
|
| 654 |
+
if self.loss_type in ["apo_zero_unpaired"]:
|
| 655 |
+
self.calculate_KL = False
|
| 656 |
+
|
| 657 |
+
# Since ref_logs are precomputed on the first call to get_train/eval_dataloader
|
| 658 |
+
# keep track of first called to avoid computation of future calls
|
| 659 |
+
self._precomputed_train_ref_log_probs = False
|
| 660 |
+
self._precomputed_eval_ref_log_probs = False
|
| 661 |
+
|
| 662 |
+
# metric
|
| 663 |
+
self._stored_metrics = defaultdict(lambda: defaultdict(list))
|
| 664 |
+
|
| 665 |
+
# KTO parameter
|
| 666 |
+
self.beta = args.beta
|
| 667 |
+
self.desirable_weight = args.desirable_weight
|
| 668 |
+
self.undesirable_weight = args.undesirable_weight
|
| 669 |
+
self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
|
| 670 |
+
self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
|
| 671 |
+
if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
|
| 672 |
+
warnings.warn(
|
| 673 |
+
"You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
|
| 674 |
+
"`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
|
| 675 |
+
"greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
|
| 676 |
+
"loss.",
|
| 677 |
+
UserWarning,
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
|
| 681 |
+
# input tensor associated with the key "input_ids". However, in KTO, the sampled data does not include the
|
| 682 |
+
# "input_ids" key. Instead, the available keys are "prompt_input_ids" and "completion_input_ids". As a result,
|
| 683 |
+
# the trainer issues the warning: "Could not estimate the number of tokens of the input, floating-point
|
| 684 |
+
# operations will not be computed." To suppress this warning, we set the "estimate_tokens" key in the model's
|
| 685 |
+
# "warnings_issued" dictionary to True. This acts as a flag to indicate that the warning has already been
|
| 686 |
+
# issued.
|
| 687 |
+
model.warnings_issued["estimate_tokens"] = True
|
| 688 |
+
|
| 689 |
+
# Compute that only on the main process for faster data processing.
|
| 690 |
+
# see: https://github.com/huggingface/trl/pull/1255
|
| 691 |
+
with PartialState().local_main_process_first():
|
| 692 |
+
# Extract the prompt if needed
|
| 693 |
+
train_dataset = train_dataset.map(
|
| 694 |
+
maybe_extract_prompt, num_proc=args.dataset_num_proc, desc="Extracting prompt from train dataset"
|
| 695 |
+
)
|
| 696 |
+
# Unpair the dataset if needed
|
| 697 |
+
train_dataset = maybe_unpair_preference_dataset(
|
| 698 |
+
train_dataset, args.dataset_num_proc, desc="Unpairing train dataset"
|
| 699 |
+
)
|
| 700 |
+
# Apply the chat template if needed
|
| 701 |
+
train_dataset = train_dataset.map(
|
| 702 |
+
maybe_apply_chat_template,
|
| 703 |
+
fn_kwargs={"tokenizer": processing_class},
|
| 704 |
+
num_proc=args.dataset_num_proc,
|
| 705 |
+
desc="Applying chat template to train dataset",
|
| 706 |
+
)
|
| 707 |
+
if eval_dataset is not None:
|
| 708 |
+
eval_dataset = eval_dataset.map(
|
| 709 |
+
maybe_extract_prompt, num_proc=args.dataset_num_proc, desc="Extracting prompt from eval dataset"
|
| 710 |
+
)
|
| 711 |
+
eval_dataset = maybe_unpair_preference_dataset(
|
| 712 |
+
eval_dataset, args.dataset_num_proc, desc="Unpairing eval dataset"
|
| 713 |
+
)
|
| 714 |
+
eval_dataset = eval_dataset.map(
|
| 715 |
+
maybe_apply_chat_template,
|
| 716 |
+
fn_kwargs={"tokenizer": processing_class},
|
| 717 |
+
num_proc=args.dataset_num_proc,
|
| 718 |
+
desc="Applying chat template to eval dataset",
|
| 719 |
+
)
|
| 720 |
+
|
| 721 |
+
# Tokenize and prepare the training datasets
|
| 722 |
+
train_dataset = train_dataset.map(
|
| 723 |
+
_tokenize,
|
| 724 |
+
batched=True,
|
| 725 |
+
fn_kwargs={"tokenizer": self.processing_class},
|
| 726 |
+
num_proc=args.dataset_num_proc,
|
| 727 |
+
desc="Tokenizing train dataset",
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
fn_kwargs = {
|
| 731 |
+
"prefix": "",
|
| 732 |
+
"is_encoder_decoder": self.is_encoder_decoder,
|
| 733 |
+
"tokenizer": self.processing_class,
|
| 734 |
+
"max_length": self.max_length,
|
| 735 |
+
"truncation_mode": self.truncation_mode,
|
| 736 |
+
"label_pad_token_id": self.label_pad_token_id,
|
| 737 |
+
"max_prompt_length": self.max_prompt_length,
|
| 738 |
+
"max_completion_length": self.max_completion_length,
|
| 739 |
+
}
|
| 740 |
+
|
| 741 |
+
train_dataset = train_dataset.map(
|
| 742 |
+
_process_tokens,
|
| 743 |
+
fn_kwargs=fn_kwargs,
|
| 744 |
+
num_proc=args.dataset_num_proc,
|
| 745 |
+
desc="Processing tokenized train dataset",
|
| 746 |
+
)
|
| 747 |
+
|
| 748 |
+
# Tokenize and prepare the eval datasets
|
| 749 |
+
if eval_dataset is not None:
|
| 750 |
+
eval_dataset = eval_dataset.map(
|
| 751 |
+
_tokenize,
|
| 752 |
+
fn_kwargs={"tokenizer": self.processing_class},
|
| 753 |
+
batched=True,
|
| 754 |
+
num_proc=args.dataset_num_proc,
|
| 755 |
+
desc="Tokenizing eval dataset",
|
| 756 |
+
)
|
| 757 |
+
|
| 758 |
+
eval_dataset = eval_dataset.map(
|
| 759 |
+
_process_tokens,
|
| 760 |
+
fn_kwargs=fn_kwargs,
|
| 761 |
+
num_proc=args.dataset_num_proc,
|
| 762 |
+
desc="Processing tokenized eval dataset",
|
| 763 |
+
)
|
| 764 |
+
|
| 765 |
+
# Get KL datasets if needed
|
| 766 |
+
if self.calculate_KL:
|
| 767 |
+
if args.per_device_train_batch_size <= 1:
|
| 768 |
+
raise ValueError(
|
| 769 |
+
"Actual (not effective) batch size must be > 1. KTO will not work properly because the KL term will be equivalent to the implied reward."
|
| 770 |
+
)
|
| 771 |
+
|
| 772 |
+
# create pairs for estimating the KL term by flipping the matched pairs in each batch of size total_batch_size
|
| 773 |
+
# i.e., (x_1, y_1), ..., (x_n, y_n) --> (x_1, y_n), ..., (x_n, y_1) = (x'_1, y'_1), ..., (x'_n, y'_n)
|
| 774 |
+
train_kl_dataset = train_dataset.map(
|
| 775 |
+
_get_kl_dataset,
|
| 776 |
+
batched=True,
|
| 777 |
+
batch_size=args.per_device_train_batch_size,
|
| 778 |
+
num_proc=args.dataset_num_proc,
|
| 779 |
+
desc="Extracting KL train dataset",
|
| 780 |
+
)
|
| 781 |
+
|
| 782 |
+
fn_kwargs["prefix"] = "KL_"
|
| 783 |
+
train_kl_dataset = train_kl_dataset.map(
|
| 784 |
+
_process_tokens,
|
| 785 |
+
fn_kwargs=fn_kwargs,
|
| 786 |
+
num_proc=args.dataset_num_proc,
|
| 787 |
+
remove_columns=[c for c in train_kl_dataset.column_names if c in train_dataset.column_names],
|
| 788 |
+
desc="Processing tokenized train KL dataset",
|
| 789 |
+
)
|
| 790 |
+
|
| 791 |
+
# merge the datasets
|
| 792 |
+
train_dataset = concatenate_datasets([train_dataset, train_kl_dataset], axis=1)
|
| 793 |
+
|
| 794 |
+
if eval_dataset is not None:
|
| 795 |
+
# Get KL dataset
|
| 796 |
+
eval_kl_dataset = eval_dataset.map(
|
| 797 |
+
_get_kl_dataset,
|
| 798 |
+
batched=True,
|
| 799 |
+
batch_size=args.per_device_train_batch_size,
|
| 800 |
+
num_proc=args.dataset_num_proc,
|
| 801 |
+
desc="Extracting eval KL dataset",
|
| 802 |
+
)
|
| 803 |
+
|
| 804 |
+
eval_kl_dataset = eval_kl_dataset.map(
|
| 805 |
+
_process_tokens,
|
| 806 |
+
fn_kwargs=fn_kwargs,
|
| 807 |
+
num_proc=args.dataset_num_proc,
|
| 808 |
+
remove_columns=[c for c in eval_kl_dataset.column_names if c in eval_dataset.column_names],
|
| 809 |
+
desc="Processing tokenized eval KL dataset",
|
| 810 |
+
)
|
| 811 |
+
|
| 812 |
+
# merge the datasets
|
| 813 |
+
eval_dataset = concatenate_datasets([eval_dataset, eval_kl_dataset], axis=1)
|
| 814 |
+
|
| 815 |
+
# calculate dataset desirability balance
|
| 816 |
+
num_desirable = max(sum(train_dataset["label"]), 1)
|
| 817 |
+
num_undesirable = max(len(train_dataset["label"]) - num_desirable, 1) # "label" is binary
|
| 818 |
+
|
| 819 |
+
if num_desirable != num_undesirable:
|
| 820 |
+
# The lower and upper bounds come from Eq. (8) of https://huggingface.co/papers/2402.01306
|
| 821 |
+
des_weight_lower_bound = round((num_undesirable * self.undesirable_weight / num_desirable) * 1, 2)
|
| 822 |
+
des_weight_upper_bound = round((num_undesirable * self.undesirable_weight / num_desirable) * 1.33, 2)
|
| 823 |
+
und_weight_lower_bound = round((num_desirable * self.desirable_weight / num_undesirable) / 1.33, 2)
|
| 824 |
+
und_weight_upper_bound = round((num_desirable * self.desirable_weight / num_undesirable) / 1, 2)
|
| 825 |
+
|
| 826 |
+
des_weight_in_range = des_weight_lower_bound <= self.desirable_weight <= des_weight_upper_bound
|
| 827 |
+
und_weight_in_range = und_weight_lower_bound <= self.undesirable_weight <= und_weight_upper_bound
|
| 828 |
+
|
| 829 |
+
if not (des_weight_in_range or und_weight_in_range):
|
| 830 |
+
warnings.warn(
|
| 831 |
+
"You have different amounts of desirable/positive and undesirable/negative examples but the "
|
| 832 |
+
"weights on the desirable and undesirable losses don't seem to be in an ideal range. Based "
|
| 833 |
+
f"on your data, we recommend EITHER "
|
| 834 |
+
f"desirable_weight in [{des_weight_lower_bound}, {des_weight_upper_bound}] or "
|
| 835 |
+
f"undesirable_weight in [{und_weight_lower_bound}, {und_weight_upper_bound}] (but NOT BOTH). "
|
| 836 |
+
"See the documentation on how to optimally set these weights.",
|
| 837 |
+
UserWarning,
|
| 838 |
+
)
|
| 839 |
+
|
| 840 |
+
super().__init__(
|
| 841 |
+
model=model,
|
| 842 |
+
args=args,
|
| 843 |
+
data_collator=data_collator,
|
| 844 |
+
train_dataset=train_dataset,
|
| 845 |
+
eval_dataset=eval_dataset,
|
| 846 |
+
processing_class=processing_class,
|
| 847 |
+
model_init=model_init,
|
| 848 |
+
compute_metrics=compute_metrics,
|
| 849 |
+
callbacks=callbacks,
|
| 850 |
+
optimizers=optimizers,
|
| 851 |
+
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
| 852 |
+
)
|
| 853 |
+
|
| 854 |
+
# Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
|
| 855 |
+
# model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
|
| 856 |
+
# self.model_accepts_loss_kwargs to False to enable scaling.
|
| 857 |
+
self.model_accepts_loss_kwargs = False
|
| 858 |
+
|
| 859 |
+
# Add tags for models that have been loaded with the correct transformers version
|
| 860 |
+
if hasattr(self.model, "add_model_tags"):
|
| 861 |
+
self.model.add_model_tags(self._tag_names)
|
| 862 |
+
|
| 863 |
+
if not hasattr(self, "accelerator"):
|
| 864 |
+
raise AttributeError(
|
| 865 |
+
"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
|
| 866 |
+
)
|
| 867 |
+
|
| 868 |
+
# Deepspeed Zero-3 does not support precompute_ref_log_probs
|
| 869 |
+
if self.is_deepspeed_enabled:
|
| 870 |
+
if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs:
|
| 871 |
+
raise ValueError(
|
| 872 |
+
"You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`."
|
| 873 |
+
)
|
| 874 |
+
|
| 875 |
+
if self.ref_model is None:
|
| 876 |
+
if not (self.is_peft_model or self.precompute_ref_log_probs):
|
| 877 |
+
raise ValueError(
|
| 878 |
+
"No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`"
|
| 879 |
+
)
|
| 880 |
+
else:
|
| 881 |
+
if self.is_deepspeed_enabled:
|
| 882 |
+
self.ref_model = self._prepare_deepspeed(self.ref_model)
|
| 883 |
+
else:
|
| 884 |
+
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
|
| 885 |
+
|
| 886 |
+
def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
|
| 887 |
+
# Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
|
| 888 |
+
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
|
| 889 |
+
config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config)
|
| 890 |
+
|
| 891 |
+
if model is not None:
|
| 892 |
+
if hasattr(model, "config"):
|
| 893 |
+
hidden_size = (
|
| 894 |
+
max(model.config.hidden_sizes)
|
| 895 |
+
if getattr(model.config, "hidden_sizes", None)
|
| 896 |
+
else getattr(model.config, "hidden_size", None)
|
| 897 |
+
)
|
| 898 |
+
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
|
| 899 |
+
# Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
|
| 900 |
+
# This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
|
| 901 |
+
config_kwargs.update(
|
| 902 |
+
{
|
| 903 |
+
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
|
| 904 |
+
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
|
| 905 |
+
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
|
| 906 |
+
}
|
| 907 |
+
)
|
| 908 |
+
|
| 909 |
+
# If ZeRO-3 is used, we shard both the active and reference model.
|
| 910 |
+
# Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
|
| 911 |
+
if config_kwargs["zero_optimization"]["stage"] != 3:
|
| 912 |
+
config_kwargs["zero_optimization"]["stage"] = 0
|
| 913 |
+
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
|
| 914 |
+
model.eval()
|
| 915 |
+
return model
|
| 916 |
+
|
| 917 |
+
@contextmanager
|
| 918 |
+
def null_ref_context(self):
|
| 919 |
+
"""Context manager for handling null reference model (that is, peft adapter manipulation)."""
|
| 920 |
+
with (
|
| 921 |
+
self.accelerator.unwrap_model(self.model).disable_adapter()
|
| 922 |
+
if self.is_peft_model and not self.ref_adapter_name
|
| 923 |
+
else nullcontext()
|
| 924 |
+
):
|
| 925 |
+
if self.ref_adapter_name:
|
| 926 |
+
self.model.set_adapter(self.ref_adapter_name)
|
| 927 |
+
yield
|
| 928 |
+
if self.ref_adapter_name:
|
| 929 |
+
self.model.set_adapter(self.model_adapter_name or "default")
|
| 930 |
+
|
| 931 |
+
def get_train_dataloader(self) -> DataLoader:
|
| 932 |
+
"""
|
| 933 |
+
Returns the training [`~torch.utils.data.DataLoader`].
|
| 934 |
+
|
| 935 |
+
Subclass of transformers.src.transformers.trainer.get_train_dataloader to precompute `ref_log_probs`.
|
| 936 |
+
"""
|
| 937 |
+
|
| 938 |
+
if self.precompute_ref_log_probs and not self._precomputed_train_ref_log_probs:
|
| 939 |
+
dataloader_params = {
|
| 940 |
+
"batch_size": self.args.per_device_train_batch_size,
|
| 941 |
+
"collate_fn": self.data_collator,
|
| 942 |
+
"num_workers": self.args.dataloader_num_workers,
|
| 943 |
+
"pin_memory": self.args.dataloader_pin_memory,
|
| 944 |
+
"shuffle": False,
|
| 945 |
+
}
|
| 946 |
+
|
| 947 |
+
# prepare dataloader
|
| 948 |
+
data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params))
|
| 949 |
+
reference_completion_logps = []
|
| 950 |
+
reference_KL_logps = []
|
| 951 |
+
|
| 952 |
+
for padded_batch in tqdm(iterable=data_loader, desc="Train dataset reference log probs"):
|
| 953 |
+
reference_completion_logp, reference_KL_logp = self.compute_reference_log_probs(padded_batch)
|
| 954 |
+
|
| 955 |
+
reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp)
|
| 956 |
+
reference_completion_logps.append(reference_completion_logp.cpu())
|
| 957 |
+
|
| 958 |
+
if self.calculate_KL:
|
| 959 |
+
reference_KL_logp = self.accelerator.gather_for_metrics(reference_KL_logp)
|
| 960 |
+
reference_KL_logps.append(reference_KL_logp.cpu())
|
| 961 |
+
|
| 962 |
+
self.train_dataset = self.train_dataset.add_column(
|
| 963 |
+
name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy()
|
| 964 |
+
)
|
| 965 |
+
|
| 966 |
+
if self.calculate_KL:
|
| 967 |
+
self.train_dataset = self.train_dataset.add_column(
|
| 968 |
+
name="reference_KL_logps", column=torch.cat(reference_KL_logps).float().numpy()
|
| 969 |
+
)
|
| 970 |
+
|
| 971 |
+
self._precomputed_train_ref_log_probs = True
|
| 972 |
+
|
| 973 |
+
return super().get_train_dataloader()
|
| 974 |
+
|
| 975 |
+
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
|
| 976 |
+
"""
|
| 977 |
+
Returns the evaluation [`~torch.utils.data.DataLoader`].
|
| 978 |
+
|
| 979 |
+
Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`.
|
| 980 |
+
|
| 981 |
+
Args:
|
| 982 |
+
eval_dataset (`torch.utils.data.Dataset`, *optional*):
|
| 983 |
+
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
|
| 984 |
+
by the `model.forward()` method are automatically removed. It must implement `__len__`.
|
| 985 |
+
"""
|
| 986 |
+
if eval_dataset is None and self.eval_dataset is None:
|
| 987 |
+
raise ValueError("Trainer: evaluation requires an eval_dataset.")
|
| 988 |
+
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
|
| 989 |
+
|
| 990 |
+
if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs:
|
| 991 |
+
dataloader_params = {
|
| 992 |
+
"batch_size": self.args.per_device_eval_batch_size,
|
| 993 |
+
"collate_fn": self.data_collator,
|
| 994 |
+
"num_workers": self.args.dataloader_num_workers,
|
| 995 |
+
"pin_memory": self.args.dataloader_pin_memory,
|
| 996 |
+
"shuffle": False,
|
| 997 |
+
}
|
| 998 |
+
|
| 999 |
+
# prepare dataloader
|
| 1000 |
+
data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params))
|
| 1001 |
+
|
| 1002 |
+
reference_completion_logps = []
|
| 1003 |
+
reference_KL_logps = []
|
| 1004 |
+
|
| 1005 |
+
for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"):
|
| 1006 |
+
reference_completion_logp, reference_KL_logp = self.compute_reference_log_probs(padded_batch)
|
| 1007 |
+
|
| 1008 |
+
reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp)
|
| 1009 |
+
reference_completion_logps.append(reference_completion_logp.cpu())
|
| 1010 |
+
|
| 1011 |
+
if self.calculate_KL:
|
| 1012 |
+
reference_KL_logp = self.accelerator.gather_for_metrics(reference_KL_logp)
|
| 1013 |
+
reference_KL_logps.append(reference_KL_logp.cpu())
|
| 1014 |
+
|
| 1015 |
+
eval_dataset = eval_dataset.add_column(
|
| 1016 |
+
name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy()
|
| 1017 |
+
)
|
| 1018 |
+
if self.calculate_KL:
|
| 1019 |
+
eval_dataset = eval_dataset.add_column(
|
| 1020 |
+
name="reference_KL_logps", column=torch.cat(reference_KL_logps).float().numpy()
|
| 1021 |
+
)
|
| 1022 |
+
|
| 1023 |
+
# Save calculated reference_chosen_logps and reference_rejected_logps to the eval_dataset for subsequent runs
|
| 1024 |
+
if self.eval_dataset is not None:
|
| 1025 |
+
self.eval_dataset = eval_dataset
|
| 1026 |
+
self._precomputed_eval_ref_log_probs = True
|
| 1027 |
+
|
| 1028 |
+
return super().get_eval_dataloader(eval_dataset=eval_dataset)
|
| 1029 |
+
|
| 1030 |
+
def compute_reference_log_probs(self, padded_batch: dict) -> dict:
|
| 1031 |
+
"""Computes log probabilities of the reference model for a single padded batch of a KTO specific dataset."""
|
| 1032 |
+
with torch.no_grad():
|
| 1033 |
+
if self.ref_model is None:
|
| 1034 |
+
with self.null_ref_context():
|
| 1035 |
+
if self.is_encoder_decoder:
|
| 1036 |
+
completion_logits = self.model(
|
| 1037 |
+
padded_batch["prompt_input_ids"],
|
| 1038 |
+
attention_mask=padded_batch["prompt_attention_mask"],
|
| 1039 |
+
decoder_input_ids=padded_batch.get("completion_decoder_input_ids"),
|
| 1040 |
+
labels=padded_batch["completion_labels"],
|
| 1041 |
+
).logits
|
| 1042 |
+
|
| 1043 |
+
if self.calculate_KL:
|
| 1044 |
+
KL_logits = self.model(
|
| 1045 |
+
padded_batch["KL_prompt_input_ids"],
|
| 1046 |
+
attention_mask=padded_batch["KL_prompt_attention_mask"],
|
| 1047 |
+
decoder_input_ids=padded_batch.get("KL_completion_decoder_input_ids"),
|
| 1048 |
+
labels=padded_batch["KL_completion_labels"],
|
| 1049 |
+
).logits
|
| 1050 |
+
else:
|
| 1051 |
+
completion_logits = self.model(
|
| 1052 |
+
padded_batch["completion_input_ids"],
|
| 1053 |
+
attention_mask=padded_batch["completion_attention_mask"],
|
| 1054 |
+
).logits
|
| 1055 |
+
|
| 1056 |
+
if self.calculate_KL:
|
| 1057 |
+
KL_logits = self.model(
|
| 1058 |
+
padded_batch["KL_completion_input_ids"],
|
| 1059 |
+
attention_mask=padded_batch["KL_completion_attention_mask"],
|
| 1060 |
+
).logits
|
| 1061 |
+
else:
|
| 1062 |
+
if self.is_encoder_decoder:
|
| 1063 |
+
completion_logits = self.ref_model(
|
| 1064 |
+
padded_batch["prompt_input_ids"],
|
| 1065 |
+
attention_mask=padded_batch["prompt_attention_mask"],
|
| 1066 |
+
decoder_input_ids=padded_batch.get("completion_decoder_input_ids"),
|
| 1067 |
+
labels=padded_batch["completion_labels"],
|
| 1068 |
+
).logits
|
| 1069 |
+
|
| 1070 |
+
if self.calculate_KL:
|
| 1071 |
+
KL_logits = self.ref_model(
|
| 1072 |
+
padded_batch["KL_prompt_input_ids"],
|
| 1073 |
+
attention_mask=padded_batch["KL_prompt_attention_mask"],
|
| 1074 |
+
decoder_input_ids=padded_batch.get("KL_completion_decoder_input_ids"),
|
| 1075 |
+
labels=padded_batch["KL_completion_labels"],
|
| 1076 |
+
).logits
|
| 1077 |
+
else:
|
| 1078 |
+
completion_logits = self.ref_model(
|
| 1079 |
+
padded_batch["completion_input_ids"], attention_mask=padded_batch["completion_attention_mask"]
|
| 1080 |
+
).logits
|
| 1081 |
+
|
| 1082 |
+
if self.calculate_KL:
|
| 1083 |
+
KL_logits = self.ref_model(
|
| 1084 |
+
padded_batch["KL_completion_input_ids"],
|
| 1085 |
+
attention_mask=padded_batch["KL_completion_attention_mask"],
|
| 1086 |
+
).logits
|
| 1087 |
+
|
| 1088 |
+
completion_logps = self.get_batch_logps(
|
| 1089 |
+
completion_logits,
|
| 1090 |
+
padded_batch["completion_labels"],
|
| 1091 |
+
average_log_prob=False,
|
| 1092 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 1093 |
+
label_pad_token_id=self.label_pad_token_id,
|
| 1094 |
+
)
|
| 1095 |
+
|
| 1096 |
+
if self.calculate_KL:
|
| 1097 |
+
KL_logps = self.get_batch_logps(
|
| 1098 |
+
KL_logits,
|
| 1099 |
+
padded_batch["KL_completion_labels"],
|
| 1100 |
+
average_log_prob=False,
|
| 1101 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 1102 |
+
label_pad_token_id=self.label_pad_token_id,
|
| 1103 |
+
)
|
| 1104 |
+
else:
|
| 1105 |
+
KL_logps = None
|
| 1106 |
+
|
| 1107 |
+
return completion_logps, KL_logps
|
| 1108 |
+
|
| 1109 |
+
@staticmethod
|
| 1110 |
+
def get_batch_logps(
|
| 1111 |
+
logits: torch.FloatTensor,
|
| 1112 |
+
labels: torch.LongTensor,
|
| 1113 |
+
average_log_prob: bool = False,
|
| 1114 |
+
label_pad_token_id: int = -100,
|
| 1115 |
+
is_encoder_decoder: bool = False,
|
| 1116 |
+
) -> torch.FloatTensor:
|
| 1117 |
+
"""Compute the log probabilities of the given labels under the given logits.
|
| 1118 |
+
|
| 1119 |
+
Args:
|
| 1120 |
+
logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
|
| 1121 |
+
labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)
|
| 1122 |
+
average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.
|
| 1123 |
+
|
| 1124 |
+
Returns:
|
| 1125 |
+
A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.
|
| 1126 |
+
"""
|
| 1127 |
+
if logits.shape[:-1] != labels.shape:
|
| 1128 |
+
raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
|
| 1129 |
+
|
| 1130 |
+
if not is_encoder_decoder:
|
| 1131 |
+
labels = labels[:, 1:].clone()
|
| 1132 |
+
logits = logits[:, :-1, :]
|
| 1133 |
+
else:
|
| 1134 |
+
# Fixes end-dec RuntimeError
|
| 1135 |
+
labels = labels.clone()
|
| 1136 |
+
|
| 1137 |
+
loss_mask = labels != label_pad_token_id
|
| 1138 |
+
|
| 1139 |
+
# dummy token; we'll ignore the losses on these tokens later
|
| 1140 |
+
labels[labels == label_pad_token_id] = 0
|
| 1141 |
+
|
| 1142 |
+
per_token_logps = selective_log_softmax(logits, labels)
|
| 1143 |
+
|
| 1144 |
+
if average_log_prob:
|
| 1145 |
+
return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
|
| 1146 |
+
else:
|
| 1147 |
+
return (per_token_logps * loss_mask).sum(-1)
|
| 1148 |
+
|
| 1149 |
+
def forward(
|
| 1150 |
+
self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]
|
| 1151 |
+
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
| 1152 |
+
if self.calculate_KL:
|
| 1153 |
+
KL_logps = None
|
| 1154 |
+
KL_model_kwargs = (
|
| 1155 |
+
{
|
| 1156 |
+
"input_ids": batch["KL_prompt_input_ids"],
|
| 1157 |
+
"attention_mask": batch["KL_prompt_attention_mask"],
|
| 1158 |
+
"labels": batch["KL_completion_labels"],
|
| 1159 |
+
"decoder_input_ids": batch.get("KL_completion_decoder_input_ids"),
|
| 1160 |
+
}
|
| 1161 |
+
if self.is_encoder_decoder
|
| 1162 |
+
else {
|
| 1163 |
+
"input_ids": batch["KL_completion_input_ids"],
|
| 1164 |
+
"attention_mask": batch["KL_completion_attention_mask"],
|
| 1165 |
+
}
|
| 1166 |
+
)
|
| 1167 |
+
with torch.no_grad():
|
| 1168 |
+
KL_logits = model(
|
| 1169 |
+
**KL_model_kwargs,
|
| 1170 |
+
).logits
|
| 1171 |
+
|
| 1172 |
+
KL_logps = self.get_batch_logps(
|
| 1173 |
+
KL_logits,
|
| 1174 |
+
batch["KL_completion_labels"],
|
| 1175 |
+
average_log_prob=False,
|
| 1176 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 1177 |
+
label_pad_token_id=self.label_pad_token_id,
|
| 1178 |
+
)
|
| 1179 |
+
else:
|
| 1180 |
+
KL_logps = None
|
| 1181 |
+
|
| 1182 |
+
model_kwargs = (
|
| 1183 |
+
{
|
| 1184 |
+
"labels": batch["completion_labels"],
|
| 1185 |
+
"decoder_input_ids": batch.get("completion_decoder_input_ids"),
|
| 1186 |
+
}
|
| 1187 |
+
if self.is_encoder_decoder
|
| 1188 |
+
else {}
|
| 1189 |
+
)
|
| 1190 |
+
if self.aux_loss_enabled:
|
| 1191 |
+
model_kwargs["output_router_logits"] = True
|
| 1192 |
+
|
| 1193 |
+
outputs = model(
|
| 1194 |
+
batch["completion_input_ids"],
|
| 1195 |
+
attention_mask=batch["completion_attention_mask"],
|
| 1196 |
+
**model_kwargs,
|
| 1197 |
+
)
|
| 1198 |
+
completion_logits = outputs.logits
|
| 1199 |
+
|
| 1200 |
+
completion_logps = self.get_batch_logps(
|
| 1201 |
+
completion_logits,
|
| 1202 |
+
batch["completion_labels"],
|
| 1203 |
+
average_log_prob=False,
|
| 1204 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 1205 |
+
label_pad_token_id=self.label_pad_token_id,
|
| 1206 |
+
)
|
| 1207 |
+
|
| 1208 |
+
if completion_logps.shape[0] != len(batch["label"]):
|
| 1209 |
+
raise ValueError(
|
| 1210 |
+
"There is a mismatch between the number of examples in this batch and the number of "
|
| 1211 |
+
"examples for which an output sequence was predicted."
|
| 1212 |
+
)
|
| 1213 |
+
|
| 1214 |
+
chosen_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is True]
|
| 1215 |
+
rejected_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is False]
|
| 1216 |
+
|
| 1217 |
+
chosen_logps = completion_logps[chosen_idx, ...]
|
| 1218 |
+
rejected_logps = completion_logps[rejected_idx, ...]
|
| 1219 |
+
|
| 1220 |
+
chosen_logits = completion_logits[chosen_idx, ...]
|
| 1221 |
+
rejected_logits = completion_logits[rejected_idx, ...]
|
| 1222 |
+
|
| 1223 |
+
if self.aux_loss_enabled:
|
| 1224 |
+
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, KL_logps, outputs.aux_loss)
|
| 1225 |
+
else:
|
| 1226 |
+
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, KL_logps)
|
| 1227 |
+
|
| 1228 |
+
def kto_loss(
|
| 1229 |
+
self,
|
| 1230 |
+
policy_chosen_logps: torch.FloatTensor,
|
| 1231 |
+
policy_rejected_logps: torch.FloatTensor,
|
| 1232 |
+
policy_KL_logps: torch.FloatTensor,
|
| 1233 |
+
reference_chosen_logps: torch.FloatTensor,
|
| 1234 |
+
reference_rejected_logps: torch.FloatTensor,
|
| 1235 |
+
reference_KL_logps: torch.FloatTensor,
|
| 1236 |
+
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
| 1237 |
+
"""Compute the KTO loss for a batch of policy and reference model log probabilities.
|
| 1238 |
+
|
| 1239 |
+
Args:
|
| 1240 |
+
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (num(chosen) in batch_size,)
|
| 1241 |
+
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (num(rejected) in batch_size,)
|
| 1242 |
+
policy_KL_logps: Log probabilities of the policy model for the KL responses. Shape: (batch_size,)
|
| 1243 |
+
reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (num(chosen) in batch_size,)
|
| 1244 |
+
reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (num(rejected) in batch_size,)
|
| 1245 |
+
reference_KL_logps: Log probabilities of the reference model for the KL responses. Shape: (batch_size,)
|
| 1246 |
+
|
| 1247 |
+
Returns:
|
| 1248 |
+
A tuple of four tensors: (losses, chosen_rewards, rejected_rewards, KL).
|
| 1249 |
+
The losses tensor contains the KTO loss for each example in the batch.
|
| 1250 |
+
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
|
| 1251 |
+
The KL tensor contains the detached KL divergence estimate between the policy and reference models.
|
| 1252 |
+
"""
|
| 1253 |
+
if self.calculate_KL:
|
| 1254 |
+
kl = (policy_KL_logps - reference_KL_logps).mean().detach()
|
| 1255 |
+
kl = self.accelerator.gather_for_metrics(kl).mean().clamp(min=0)
|
| 1256 |
+
else:
|
| 1257 |
+
kl = torch.zeros(1).to(policy_chosen_logps.device)
|
| 1258 |
+
|
| 1259 |
+
# Chosen losses
|
| 1260 |
+
if policy_chosen_logps.shape[0] != 0 or reference_chosen_logps.shape[0] != 0:
|
| 1261 |
+
chosen_logratios = policy_chosen_logps - reference_chosen_logps
|
| 1262 |
+
|
| 1263 |
+
if self.loss_type == "kto":
|
| 1264 |
+
# Eqn (7) of the KTO paper (https://huggingface.co/papers/2402.01306)
|
| 1265 |
+
chosen_losses = 1 - F.sigmoid(self.beta * (chosen_logratios - kl))
|
| 1266 |
+
elif self.loss_type == "apo_zero_unpaired":
|
| 1267 |
+
# Unpaired variant of Eqn (7) of the APO paper (https://huggingface.co/papers/2408.06266)
|
| 1268 |
+
# Use this loss when you believe the chosen outputs are better than your model's default output
|
| 1269 |
+
chosen_losses = 1 - F.sigmoid(self.beta * chosen_logratios)
|
| 1270 |
+
|
| 1271 |
+
chosen_rewards = self.beta * chosen_logratios.detach()
|
| 1272 |
+
|
| 1273 |
+
else:
|
| 1274 |
+
# lists can't be empty -- if they are, then accelerate.gather will hang
|
| 1275 |
+
chosen_losses = torch.Tensor([]).to(self.accelerator.device)
|
| 1276 |
+
chosen_rewards = torch.Tensor([]).to(self.accelerator.device)
|
| 1277 |
+
|
| 1278 |
+
# Rejected losses
|
| 1279 |
+
if policy_rejected_logps.shape[0] != 0 or reference_rejected_logps.shape[0] != 0:
|
| 1280 |
+
rejected_logratios = policy_rejected_logps - reference_rejected_logps
|
| 1281 |
+
|
| 1282 |
+
if self.loss_type == "kto":
|
| 1283 |
+
rejected_losses = 1 - F.sigmoid(self.beta * (kl - rejected_logratios))
|
| 1284 |
+
elif self.loss_type == "apo_zero_unpaired":
|
| 1285 |
+
rejected_losses = F.sigmoid(self.beta * rejected_logratios)
|
| 1286 |
+
|
| 1287 |
+
rejected_rewards = self.beta * rejected_logratios.detach()
|
| 1288 |
+
else:
|
| 1289 |
+
# lists can't be empty -- if they are, then accelerate.gather will hang
|
| 1290 |
+
rejected_losses = torch.Tensor([]).to(self.accelerator.device)
|
| 1291 |
+
rejected_rewards = torch.Tensor([]).to(self.accelerator.device)
|
| 1292 |
+
|
| 1293 |
+
losses = torch.cat(
|
| 1294 |
+
(self.desirable_weight * chosen_losses, self.undesirable_weight * rejected_losses),
|
| 1295 |
+
0,
|
| 1296 |
+
)
|
| 1297 |
+
|
| 1298 |
+
return losses, chosen_rewards, rejected_rewards, kl
|
| 1299 |
+
|
| 1300 |
+
def get_batch_loss_metrics(
|
| 1301 |
+
self,
|
| 1302 |
+
model,
|
| 1303 |
+
batch: dict[str, Union[list, torch.LongTensor]],
|
| 1304 |
+
):
|
| 1305 |
+
"""Compute the KTO loss and other metrics for the given batch of inputs for train or test."""
|
| 1306 |
+
metrics = {}
|
| 1307 |
+
batch = {k: (v.to(self.accelerator.device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
|
| 1308 |
+
|
| 1309 |
+
forward_output = self.forward(model, batch)
|
| 1310 |
+
(
|
| 1311 |
+
policy_chosen_logps,
|
| 1312 |
+
policy_rejected_logps,
|
| 1313 |
+
policy_chosen_logits,
|
| 1314 |
+
policy_rejected_logits,
|
| 1315 |
+
policy_KL_logps,
|
| 1316 |
+
) = forward_output[:5]
|
| 1317 |
+
if self.aux_loss_enabled:
|
| 1318 |
+
aux_loss = forward_output[5]
|
| 1319 |
+
|
| 1320 |
+
# if reference_logps in batch use them, otherwise use the reference model
|
| 1321 |
+
if "reference_logps" in batch:
|
| 1322 |
+
chosen_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is True]
|
| 1323 |
+
rejected_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is False]
|
| 1324 |
+
|
| 1325 |
+
reference_chosen_logps = batch["reference_logps"][chosen_idx, ...]
|
| 1326 |
+
reference_rejected_logps = batch["reference_logps"][rejected_idx, ...]
|
| 1327 |
+
if self.calculate_KL:
|
| 1328 |
+
reference_KL_logps = batch["reference_KL_logps"]
|
| 1329 |
+
else:
|
| 1330 |
+
reference_KL_logps = None
|
| 1331 |
+
else:
|
| 1332 |
+
with torch.no_grad():
|
| 1333 |
+
if self.ref_model is None:
|
| 1334 |
+
with self.null_ref_context():
|
| 1335 |
+
(
|
| 1336 |
+
reference_chosen_logps,
|
| 1337 |
+
reference_rejected_logps,
|
| 1338 |
+
_,
|
| 1339 |
+
_,
|
| 1340 |
+
reference_KL_logps,
|
| 1341 |
+
) = self.forward(self.model, batch)[:5]
|
| 1342 |
+
else:
|
| 1343 |
+
(
|
| 1344 |
+
reference_chosen_logps,
|
| 1345 |
+
reference_rejected_logps,
|
| 1346 |
+
_,
|
| 1347 |
+
_,
|
| 1348 |
+
reference_KL_logps,
|
| 1349 |
+
) = self.forward(self.ref_model, batch)[:5]
|
| 1350 |
+
|
| 1351 |
+
losses, chosen_rewards, rejected_rewards, kl = self.kto_loss(
|
| 1352 |
+
policy_chosen_logps,
|
| 1353 |
+
policy_rejected_logps,
|
| 1354 |
+
policy_KL_logps,
|
| 1355 |
+
reference_chosen_logps,
|
| 1356 |
+
reference_rejected_logps,
|
| 1357 |
+
reference_KL_logps,
|
| 1358 |
+
)
|
| 1359 |
+
metrics["kl"] = kl.item()
|
| 1360 |
+
|
| 1361 |
+
num_chosen = torch.Tensor([len(chosen_rewards)]).to(self.accelerator.device)
|
| 1362 |
+
num_rejected = torch.Tensor([len(rejected_rewards)]).to(self.accelerator.device)
|
| 1363 |
+
|
| 1364 |
+
all_num_chosen = self.accelerator.gather_for_metrics(num_chosen).sum().item()
|
| 1365 |
+
all_num_rejected = self.accelerator.gather_for_metrics(num_rejected).sum().item()
|
| 1366 |
+
|
| 1367 |
+
if all_num_chosen > 0:
|
| 1368 |
+
metrics["rewards/chosen_sum"] = (
|
| 1369 |
+
self.accelerator.gather_for_metrics(chosen_rewards.nansum()).nansum().item()
|
| 1370 |
+
)
|
| 1371 |
+
metrics["logps/chosen_sum"] = (
|
| 1372 |
+
self.accelerator.gather_for_metrics(policy_chosen_logps.nansum()).nansum().item()
|
| 1373 |
+
)
|
| 1374 |
+
metrics["logits/chosen_sum"] = (
|
| 1375 |
+
self.accelerator.gather_for_metrics(policy_chosen_logits.nansum()).nansum().item()
|
| 1376 |
+
)
|
| 1377 |
+
metrics["count/chosen"] = all_num_chosen
|
| 1378 |
+
|
| 1379 |
+
if all_num_rejected > 0:
|
| 1380 |
+
metrics["rewards/rejected_sum"] = (
|
| 1381 |
+
self.accelerator.gather_for_metrics(rejected_rewards.nansum()).nansum().item()
|
| 1382 |
+
)
|
| 1383 |
+
metrics["logps/rejected_sum"] = (
|
| 1384 |
+
self.accelerator.gather_for_metrics(policy_rejected_logps.nansum()).nansum().item()
|
| 1385 |
+
)
|
| 1386 |
+
metrics["logits/rejected_sum"] = (
|
| 1387 |
+
self.accelerator.gather_for_metrics(policy_rejected_logits.nansum()).nansum().item()
|
| 1388 |
+
)
|
| 1389 |
+
metrics["count/rejected"] = all_num_rejected
|
| 1390 |
+
|
| 1391 |
+
loss = losses.nanmean()
|
| 1392 |
+
if self.aux_loss_enabled:
|
| 1393 |
+
loss += self.aux_loss_coef * aux_loss
|
| 1394 |
+
|
| 1395 |
+
return loss, metrics
|
| 1396 |
+
|
| 1397 |
+
def compute_loss(
|
| 1398 |
+
self,
|
| 1399 |
+
model: Union[PreTrainedModel, nn.Module],
|
| 1400 |
+
inputs: dict[str, Union[torch.Tensor, Any]],
|
| 1401 |
+
return_outputs=False,
|
| 1402 |
+
num_items_in_batch=None,
|
| 1403 |
+
) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
|
| 1404 |
+
compute_loss_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1405 |
+
|
| 1406 |
+
with compute_loss_context_manager:
|
| 1407 |
+
loss, metrics = self.get_batch_loss_metrics(model, inputs)
|
| 1408 |
+
|
| 1409 |
+
# Make sure to move the loss to the device the original accumulating loss is at back in the `Trainer` class:
|
| 1410 |
+
loss = loss.to(self.args.device)
|
| 1411 |
+
# force log the metrics
|
| 1412 |
+
if self.accelerator.is_main_process:
|
| 1413 |
+
self.store_metrics(metrics, train_eval="train")
|
| 1414 |
+
|
| 1415 |
+
if return_outputs:
|
| 1416 |
+
return (loss, metrics)
|
| 1417 |
+
return loss
|
| 1418 |
+
|
| 1419 |
+
def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
|
| 1420 |
+
for key, value in metrics.items():
|
| 1421 |
+
self._stored_metrics[train_eval][key].append(value)
|
| 1422 |
+
|
| 1423 |
+
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
|
| 1424 |
+
if self.train_dataset is None or not has_length(self.train_dataset):
|
| 1425 |
+
return None
|
| 1426 |
+
return SequentialSampler(self.train_dataset)
|
| 1427 |
+
|
| 1428 |
+
def generate_from_model_and_ref(self, model, batch: dict[str, torch.LongTensor]) -> tuple[str, str]:
|
| 1429 |
+
"""Generate samples from the model and reference model for the given batch of inputs."""
|
| 1430 |
+
|
| 1431 |
+
# If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
|
| 1432 |
+
# the torch cuda amp context manager as some hidden states are silently casted to full precision.
|
| 1433 |
+
generate_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1434 |
+
|
| 1435 |
+
with generate_context_manager:
|
| 1436 |
+
policy_output = model.generate(
|
| 1437 |
+
input_ids=batch["prompt_input_ids"],
|
| 1438 |
+
attention_mask=batch["prompt_attention_mask"],
|
| 1439 |
+
max_length=self.max_length,
|
| 1440 |
+
do_sample=True,
|
| 1441 |
+
pad_token_id=self.processing_class.pad_token_id,
|
| 1442 |
+
)
|
| 1443 |
+
|
| 1444 |
+
# if reference_output in batch use that otherwise use the reference model
|
| 1445 |
+
if "reference_output" in batch:
|
| 1446 |
+
reference_output = batch["reference_output"]
|
| 1447 |
+
else:
|
| 1448 |
+
if self.ref_model is None:
|
| 1449 |
+
with self.null_ref_context():
|
| 1450 |
+
reference_output = self.model.generate(
|
| 1451 |
+
input_ids=batch["prompt_input_ids"],
|
| 1452 |
+
attention_mask=batch["prompt_attention_mask"],
|
| 1453 |
+
max_length=self.max_length,
|
| 1454 |
+
do_sample=True,
|
| 1455 |
+
pad_token_id=self.processing_class.pad_token_id,
|
| 1456 |
+
)
|
| 1457 |
+
else:
|
| 1458 |
+
reference_output = self.ref_model.generate(
|
| 1459 |
+
input_ids=batch["prompt_input_ids"],
|
| 1460 |
+
attention_mask=batch["prompt_attention_mask"],
|
| 1461 |
+
max_length=self.max_length,
|
| 1462 |
+
do_sample=True,
|
| 1463 |
+
pad_token_id=self.processing_class.pad_token_id,
|
| 1464 |
+
)
|
| 1465 |
+
|
| 1466 |
+
policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
|
| 1467 |
+
policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
|
| 1468 |
+
|
| 1469 |
+
reference_output = pad_to_length(reference_output, self.max_length, self.processing_class.pad_token_id)
|
| 1470 |
+
reference_output_decoded = self.processing_class.batch_decode(reference_output, skip_special_tokens=True)
|
| 1471 |
+
|
| 1472 |
+
return policy_output_decoded, reference_output_decoded
|
| 1473 |
+
|
| 1474 |
+
def prediction_step(
|
| 1475 |
+
self,
|
| 1476 |
+
model: Union[PreTrainedModel, nn.Module],
|
| 1477 |
+
inputs: dict[str, Union[torch.Tensor, Any]],
|
| 1478 |
+
prediction_loss_only: bool,
|
| 1479 |
+
ignore_keys: Optional[list[str]] = None,
|
| 1480 |
+
):
|
| 1481 |
+
if ignore_keys is None:
|
| 1482 |
+
if hasattr(model, "config"):
|
| 1483 |
+
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
|
| 1484 |
+
else:
|
| 1485 |
+
ignore_keys = []
|
| 1486 |
+
|
| 1487 |
+
prediction_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1488 |
+
with torch.no_grad(), prediction_context_manager:
|
| 1489 |
+
loss, metrics = self.get_batch_loss_metrics(model, inputs)
|
| 1490 |
+
|
| 1491 |
+
# force log the metrics
|
| 1492 |
+
if self.accelerator.is_main_process:
|
| 1493 |
+
self.store_metrics(metrics, train_eval="eval")
|
| 1494 |
+
|
| 1495 |
+
if prediction_loss_only:
|
| 1496 |
+
return (loss.detach(), None, None)
|
| 1497 |
+
|
| 1498 |
+
# logits for the chosen and rejected samples from model
|
| 1499 |
+
logits_dict = {
|
| 1500 |
+
"eval_logits/chosen": metrics["logits/chosen"],
|
| 1501 |
+
"eval_logits/rejected": metrics["logits/rejected"],
|
| 1502 |
+
}
|
| 1503 |
+
logits = torch.tensor(
|
| 1504 |
+
[v for k, v in logits_dict.items() if k not in ignore_keys], device=self.accelerator.device
|
| 1505 |
+
)
|
| 1506 |
+
labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
|
| 1507 |
+
|
| 1508 |
+
return (loss.detach(), logits, labels)
|
| 1509 |
+
|
| 1510 |
+
def evaluation_loop(
|
| 1511 |
+
self,
|
| 1512 |
+
dataloader: DataLoader,
|
| 1513 |
+
description: str,
|
| 1514 |
+
prediction_loss_only: Optional[bool] = None,
|
| 1515 |
+
ignore_keys: Optional[list[str]] = None,
|
| 1516 |
+
metric_key_prefix: str = "eval",
|
| 1517 |
+
) -> EvalLoopOutput:
|
| 1518 |
+
"""
|
| 1519 |
+
Overriding built-in evaluation loop to store metrics for each batch.
|
| 1520 |
+
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
|
| 1521 |
+
|
| 1522 |
+
Works both with or without labels.
|
| 1523 |
+
"""
|
| 1524 |
+
|
| 1525 |
+
# Sample and save to game log if requested (for one batch to save time)
|
| 1526 |
+
if self.generate_during_eval:
|
| 1527 |
+
# Generate random indices within the range of the total number of samples
|
| 1528 |
+
num_samples = len(dataloader.dataset)
|
| 1529 |
+
random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
|
| 1530 |
+
|
| 1531 |
+
# Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
|
| 1532 |
+
random_batch_dataset = dataloader.dataset.select(random_indices)
|
| 1533 |
+
random_batch = self.data_collator(random_batch_dataset)
|
| 1534 |
+
random_batch = self._prepare_inputs(random_batch)
|
| 1535 |
+
|
| 1536 |
+
target_indicies = [i for i in range(len(random_batch["label"])) if random_batch["label"][i] is False]
|
| 1537 |
+
target_batch = {
|
| 1538 |
+
"prompt_input_ids": random_batch["prompt_input_ids"][target_indicies],
|
| 1539 |
+
"prompt_attention_mask": random_batch["prompt_attention_mask"][target_indicies],
|
| 1540 |
+
"prompt": itemgetter(*target_indicies)(random_batch["prompt"]),
|
| 1541 |
+
}
|
| 1542 |
+
policy_output_decoded, ref_output_decoded = self.generate_from_model_and_ref(self.model, target_batch)
|
| 1543 |
+
|
| 1544 |
+
table = pd.DataFrame(
|
| 1545 |
+
columns=["Prompt", "Policy", "Ref Model"],
|
| 1546 |
+
data=[
|
| 1547 |
+
[prompt, pol[len(prompt) :], ref[len(prompt) :]]
|
| 1548 |
+
for prompt, pol, ref in zip(target_batch["prompt"], policy_output_decoded, ref_output_decoded)
|
| 1549 |
+
],
|
| 1550 |
+
)
|
| 1551 |
+
if "wandb" in self.args.report_to:
|
| 1552 |
+
wandb.log({"game_log": wandb.Table(data=table)})
|
| 1553 |
+
|
| 1554 |
+
if "comet_ml" in self.args.report_to:
|
| 1555 |
+
log_table_to_comet_experiment(
|
| 1556 |
+
name="game_log.csv",
|
| 1557 |
+
table=table,
|
| 1558 |
+
)
|
| 1559 |
+
|
| 1560 |
+
# Base evaluation
|
| 1561 |
+
initial_output = super().evaluation_loop(
|
| 1562 |
+
dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
|
| 1563 |
+
)
|
| 1564 |
+
|
| 1565 |
+
return initial_output
|
| 1566 |
+
|
| 1567 |
+
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
|
| 1568 |
+
"""
|
| 1569 |
+
Log `logs` on the various objects watching training, including stored metrics.
|
| 1570 |
+
|
| 1571 |
+
Args:
|
| 1572 |
+
logs (`dict[str, float]`):
|
| 1573 |
+
The values to log.
|
| 1574 |
+
start_time (`float` or `None`, *optional*, defaults to `None`):
|
| 1575 |
+
Start time of the training.
|
| 1576 |
+
"""
|
| 1577 |
+
# logs either has 'loss' or 'eval_loss'
|
| 1578 |
+
train_eval = "train" if "loss" in logs else "eval"
|
| 1579 |
+
# train metrics should have no prefix, eval should have 'eval_'
|
| 1580 |
+
prefix = "eval_" if train_eval == "eval" else ""
|
| 1581 |
+
# accumulate average metrics from sums and lengths
|
| 1582 |
+
for split in ["chosen", "rejected"]:
|
| 1583 |
+
if f"count/{split}" in self._stored_metrics[train_eval]:
|
| 1584 |
+
count_sum = torch.Tensor(self._stored_metrics[train_eval][f"count/{split}"]).sum().item()
|
| 1585 |
+
for metric in ["rewards", "logps", "logits"]:
|
| 1586 |
+
logs[f"{prefix}{metric}/{split}"] = (
|
| 1587 |
+
torch.Tensor(self._stored_metrics[train_eval][f"{metric}/{split}_sum"]).sum().item()
|
| 1588 |
+
/ count_sum
|
| 1589 |
+
)
|
| 1590 |
+
# delete obsolete metric
|
| 1591 |
+
del self._stored_metrics[train_eval][f"{metric}/{split}_sum"]
|
| 1592 |
+
del self._stored_metrics[train_eval][f"count/{split}"]
|
| 1593 |
+
# calculate reward margin
|
| 1594 |
+
if f"{prefix}rewards/chosen" in logs and f"{prefix}rewards/rejected" in logs:
|
| 1595 |
+
logs[f"{prefix}rewards/margins"] = logs[f"{prefix}rewards/chosen"] - logs[f"{prefix}rewards/rejected"]
|
| 1596 |
+
# Add averaged stored metrics to logs
|
| 1597 |
+
for key, metrics in self._stored_metrics[train_eval].items():
|
| 1598 |
+
logs[f"{prefix}{key}"] = torch.Tensor(metrics).mean().item()
|
| 1599 |
+
del self._stored_metrics[train_eval]
|
| 1600 |
+
|
| 1601 |
+
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
|
| 1602 |
+
return super().log(logs, start_time)
|
| 1603 |
+
else: # transformers<=4.46
|
| 1604 |
+
return super().log(logs)
|
| 1605 |
+
|
| 1606 |
+
def create_model_card(
|
| 1607 |
+
self,
|
| 1608 |
+
model_name: Optional[str] = None,
|
| 1609 |
+
dataset_name: Optional[str] = None,
|
| 1610 |
+
tags: Union[str, list[str], None] = None,
|
| 1611 |
+
):
|
| 1612 |
+
"""
|
| 1613 |
+
Creates a draft of a model card using the information available to the `Trainer`.
|
| 1614 |
+
|
| 1615 |
+
Args:
|
| 1616 |
+
model_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1617 |
+
Name of the model.
|
| 1618 |
+
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1619 |
+
Name of the dataset used for training.
|
| 1620 |
+
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
| 1621 |
+
Tags to be associated with the model card.
|
| 1622 |
+
"""
|
| 1623 |
+
if not self.is_world_process_zero():
|
| 1624 |
+
return
|
| 1625 |
+
|
| 1626 |
+
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
| 1627 |
+
base_model = self.model.config._name_or_path
|
| 1628 |
+
else:
|
| 1629 |
+
base_model = None
|
| 1630 |
+
|
| 1631 |
+
tags = tags or []
|
| 1632 |
+
if isinstance(tags, str):
|
| 1633 |
+
tags = [tags]
|
| 1634 |
+
|
| 1635 |
+
if hasattr(self.model.config, "unsloth_version"):
|
| 1636 |
+
tags.append("unsloth")
|
| 1637 |
+
|
| 1638 |
+
citation = textwrap.dedent("""\
|
| 1639 |
+
@article{ethayarajh2024kto,
|
| 1640 |
+
title = {{KTO: Model Alignment as Prospect Theoretic Optimization}},
|
| 1641 |
+
author = {Kawin Ethayarajh and Winnie Xu and Niklas Muennighoff and Dan Jurafsky and Douwe Kiela},
|
| 1642 |
+
year = 2024,
|
| 1643 |
+
eprint = {arXiv:2402.01306},
|
| 1644 |
+
}""")
|
| 1645 |
+
|
| 1646 |
+
model_card = generate_model_card(
|
| 1647 |
+
base_model=base_model,
|
| 1648 |
+
model_name=model_name,
|
| 1649 |
+
hub_model_id=self.hub_model_id,
|
| 1650 |
+
dataset_name=dataset_name,
|
| 1651 |
+
tags=tags,
|
| 1652 |
+
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
| 1653 |
+
comet_url=get_comet_experiment_url(),
|
| 1654 |
+
trainer_name="KTO",
|
| 1655 |
+
trainer_citation=citation,
|
| 1656 |
+
paper_title="KTO: Model Alignment as Prospect Theoretic Optimization",
|
| 1657 |
+
paper_id="2402.01306",
|
| 1658 |
+
)
|
| 1659 |
+
|
| 1660 |
+
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
| 1661 |
+
class UnslothKTOTrainer(_UnslothKTOTrainer):
|
| 1662 |
+
"""
|
| 1663 |
+
|
| 1664 |
+
Initialize KTOTrainer.
|
| 1665 |
+
|
| 1666 |
+
Args:
|
| 1667 |
+
model (`transformers.PreTrainedModel`):
|
| 1668 |
+
The model to train, preferably an `AutoModelForSequenceClassification`.
|
| 1669 |
+
ref_model (`PreTrainedModelWrapper`):
|
| 1670 |
+
Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no
|
| 1671 |
+
reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized.
|
| 1672 |
+
args (`KTOConfig`):
|
| 1673 |
+
The arguments to use for training.
|
| 1674 |
+
train_dataset (`datasets.Dataset`):
|
| 1675 |
+
The dataset to use for training.
|
| 1676 |
+
eval_dataset (`datasets.Dataset`):
|
| 1677 |
+
The dataset to use for evaluation.
|
| 1678 |
+
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
|
| 1679 |
+
Processing class used to process the data. If provided, will be used to automatically process the inputs
|
| 1680 |
+
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
|
| 1681 |
+
reuse the fine-tuned model.
|
| 1682 |
+
data_collator (`transformers.DataCollator`, *optional*, defaults to `None`):
|
| 1683 |
+
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
|
| 1684 |
+
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
|
| 1685 |
+
model_init (`Callable[[], transformers.PreTrainedModel]`):
|
| 1686 |
+
The model initializer to use for training. If None is specified, the default model initializer will be used.
|
| 1687 |
+
callbacks (`list[transformers.TrainerCallback]`):
|
| 1688 |
+
The callbacks to use for training.
|
| 1689 |
+
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
|
| 1690 |
+
The optimizer and scheduler to use for training.
|
| 1691 |
+
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
|
| 1692 |
+
The function to use to preprocess the logits before computing the metrics.
|
| 1693 |
+
peft_config (`dict`, defaults to `None`):
|
| 1694 |
+
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
|
| 1695 |
+
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
|
| 1696 |
+
The function to use to compute the metrics. Must take a `EvalPrediction` and return
|
| 1697 |
+
a dictionary string to metric values.
|
| 1698 |
+
model_adapter_name (`str`, defaults to `None`):
|
| 1699 |
+
Name of the train target PEFT adapter, when using LoRA with multiple adapters.
|
| 1700 |
+
ref_adapter_name (`str`, defaults to `None`):
|
| 1701 |
+
Name of the reference PEFT adapter, when using LoRA with multiple adapters.
|
| 1702 |
+
|
| 1703 |
+
"""
|
| 1704 |
+
def __init__(
|
| 1705 |
+
self,
|
| 1706 |
+
model = None,
|
| 1707 |
+
ref_model = None,
|
| 1708 |
+
args = None,
|
| 1709 |
+
train_dataset = None,
|
| 1710 |
+
eval_dataset = None,
|
| 1711 |
+
processing_class = None,
|
| 1712 |
+
data_collator = None,
|
| 1713 |
+
model_init = None,
|
| 1714 |
+
callbacks = None,
|
| 1715 |
+
preprocess_logits_for_metrics = None,
|
| 1716 |
+
peft_config = None,
|
| 1717 |
+
compute_metrics = None,
|
| 1718 |
+
model_adapter_name = None,
|
| 1719 |
+
ref_adapter_name = None,
|
| 1720 |
+
**kwargs
|
| 1721 |
+
):
|
| 1722 |
+
if args is None: args = UnslothKTOConfig()
|
| 1723 |
+
use_bf16 = getattr(args, 'bf16', False)
|
| 1724 |
+
use_fp16 = getattr(args, 'fp16', False)
|
| 1725 |
+
force_float32 = False
|
| 1726 |
+
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
| 1727 |
+
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
| 1728 |
+
force_float32 = True
|
| 1729 |
+
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
| 1730 |
+
dtype = getattr(model.config, 'torch_dtype', None)
|
| 1731 |
+
if dtype is None: dtype = model.get_input_embeddings().dtype
|
| 1732 |
+
from unsloth_zoo.utils import _get_dtype
|
| 1733 |
+
dtype = _get_dtype(dtype)
|
| 1734 |
+
float16 = dtype == torch.float16
|
| 1735 |
+
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
| 1736 |
+
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
| 1737 |
+
if force_float32:
|
| 1738 |
+
args.fp16 = False
|
| 1739 |
+
args.bf16 = False
|
| 1740 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
| 1741 |
+
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
| 1742 |
+
args.fp16 = float16
|
| 1743 |
+
args.bf16 = not float16
|
| 1744 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
| 1745 |
+
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
| 1746 |
+
args.eval_strategy = 'steps'
|
| 1747 |
+
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
| 1748 |
+
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
| 1749 |
+
if ga_steps is not None and ga_steps > 1:
|
| 1750 |
+
from transformers import __version__ as transformers_version
|
| 1751 |
+
if Version(transformers_version) <= Version('4.45.2'):
|
| 1752 |
+
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
| 1753 |
+
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
| 1754 |
+
if getattr(args, 'eval_strategy', 'no') != 'no':
|
| 1755 |
+
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
| 1756 |
+
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
| 1757 |
+
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
| 1758 |
+
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
| 1759 |
+
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
| 1760 |
+
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
| 1761 |
+
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
| 1762 |
+
if force_float32:
|
| 1763 |
+
args.bf16_full_eval = False
|
| 1764 |
+
args.fp16_full_eval = False
|
| 1765 |
+
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
| 1766 |
+
args.bf16_full_eval = True
|
| 1767 |
+
args.fp16_full_eval = False
|
| 1768 |
+
elif not bf16_full_eval and not fp16_full_eval:
|
| 1769 |
+
args.bf16_full_eval = args.bf16
|
| 1770 |
+
args.fp16_full_eval = args.fp16
|
| 1771 |
+
_output_logits = False
|
| 1772 |
+
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
| 1773 |
+
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
| 1774 |
+
if _output_logits:
|
| 1775 |
+
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
| 1776 |
+
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
| 1777 |
+
pass
|
| 1778 |
+
else:
|
| 1779 |
+
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
| 1780 |
+
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
| 1781 |
+
if args_max_seq_length is None and model_max_seq_length is not None:
|
| 1782 |
+
max_seq_length = model.max_seq_length
|
| 1783 |
+
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
| 1784 |
+
if model is not None and hasattr(model, 'for_training'):
|
| 1785 |
+
model.for_training()
|
| 1786 |
+
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
| 1787 |
+
if 'processing_class' in locals():
|
| 1788 |
+
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
| 1789 |
+
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
| 1790 |
+
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
| 1791 |
+
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
| 1792 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1793 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
| 1794 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer, mlm = False)
|
| 1795 |
+
elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
| 1796 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer)
|
| 1797 |
+
else:
|
| 1798 |
+
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
| 1799 |
+
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
| 1800 |
+
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
| 1801 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1802 |
+
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
| 1803 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
| 1804 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer)
|
| 1805 |
+
else:
|
| 1806 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False)
|
| 1807 |
+
other_metrics = []
|
| 1808 |
+
|
| 1809 |
+
from unsloth_zoo.logging_utils import PatchRLStatistics
|
| 1810 |
+
PatchRLStatistics('kto_trainer', other_metrics)
|
| 1811 |
+
|
| 1812 |
+
super().__init__(
|
| 1813 |
+
model = model,
|
| 1814 |
+
ref_model = ref_model,
|
| 1815 |
+
args = args,
|
| 1816 |
+
train_dataset = train_dataset,
|
| 1817 |
+
eval_dataset = eval_dataset,
|
| 1818 |
+
processing_class = processing_class,
|
| 1819 |
+
data_collator = data_collator,
|
| 1820 |
+
model_init = model_init,
|
| 1821 |
+
callbacks = callbacks,
|
| 1822 |
+
preprocess_logits_for_metrics = preprocess_logits_for_metrics,
|
| 1823 |
+
peft_config = peft_config,
|
| 1824 |
+
compute_metrics = compute_metrics,
|
| 1825 |
+
model_adapter_name = model_adapter_name,
|
| 1826 |
+
ref_adapter_name = ref_adapter_name,**kwargs)
|
| 1827 |
+
if hasattr(self, 'neftune_hook_handle'):
|
| 1828 |
+
self.neftune_hook_handle.remove()
|
| 1829 |
+
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
| 1830 |
+
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
| 1831 |
+
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
| 1832 |
+
pass
|
| 1833 |
+
|
| 1834 |
+
pass
|
unsloth_compiled_cache/UnslothNashMDTrainer.py
ADDED
|
@@ -0,0 +1,949 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
2025.4.1
|
| 3 |
+
2025.4.1
|
| 4 |
+
4.51.3
|
| 5 |
+
0.15.2
|
| 6 |
+
__UNSLOTH_VERSIONING__
|
| 7 |
+
"""
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from torch.nn import functional as F
|
| 12 |
+
from trl.trainer.nash_md_trainer import (Any, BaseImageProcessor, BasePairwiseJudge, Callable, Dataset, EvalPrediction, F, FeatureExtractionMixin, GeometricMixtureWrapper, IterableDataset, NashMDConfig, NashMDTrainer, OnlineDPOTrainer, OptimizerNames, Optional, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, SIMPLE_CHAT_TEMPLATE, TrainerCallback, Union, empty_cache, generate_model_card, get_comet_experiment_url, get_reward, is_conversational, is_wandb_available, jinja2, maybe_apply_chat_template, nn, os, textwrap, torch, truncate_right, unwrap_model_for_generation)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from typing import *
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from packaging.version import Version
|
| 19 |
+
import torch
|
| 20 |
+
import numpy as np
|
| 21 |
+
from contextlib import nullcontext
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling
|
| 24 |
+
|
| 25 |
+
torch_compile_options = {
|
| 26 |
+
"epilogue_fusion" : True,
|
| 27 |
+
"max_autotune" : False,
|
| 28 |
+
"shape_padding" : True,
|
| 29 |
+
"trace.enabled" : False,
|
| 30 |
+
"triton.cudagraphs" : False,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
| 34 |
+
def selective_log_softmax(logits, index):
|
| 35 |
+
logits = logits.to(torch.float32)
|
| 36 |
+
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
| 37 |
+
# loop to reduce peak mem consumption
|
| 38 |
+
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
| 39 |
+
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
| 40 |
+
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
| 41 |
+
return per_token_logps
|
| 42 |
+
@dataclass
|
| 43 |
+
class UnslothNashMDConfig(NashMDConfig):
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
Configuration class for the [`NashMDTrainer`].
|
| 47 |
+
|
| 48 |
+
Subclass of [`OnlineDPOConfig`] we can use all its arguments and add the following:
|
| 49 |
+
|
| 50 |
+
Parameters:
|
| 51 |
+
mixture_coef (`float` or `list[float]`, *optional*, defaults to `0.5`):
|
| 52 |
+
Logit mixture coefficient for the model and reference model. If a list of floats is provided then the
|
| 53 |
+
mixture coefficient is selected for each new epoch and the last coefficient is used for the rest of the
|
| 54 |
+
epochs.
|
| 55 |
+
|
| 56 |
+
"""
|
| 57 |
+
vllm_sampling_params: Optional[Any] = field(
|
| 58 |
+
default = None,
|
| 59 |
+
metadata = {'help': 'vLLM SamplingParams'},
|
| 60 |
+
)
|
| 61 |
+
unsloth_num_chunks : Optional[int] = field(
|
| 62 |
+
default = -1,
|
| 63 |
+
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
| 64 |
+
)
|
| 65 |
+
def __init__(
|
| 66 |
+
self,
|
| 67 |
+
output_dir = None,
|
| 68 |
+
overwrite_output_dir = None,
|
| 69 |
+
do_train = False,
|
| 70 |
+
do_eval = False,
|
| 71 |
+
do_predict = False,
|
| 72 |
+
eval_strategy = 'no',
|
| 73 |
+
prediction_loss_only = False,
|
| 74 |
+
per_device_train_batch_size = 4,
|
| 75 |
+
per_device_eval_batch_size = 4,
|
| 76 |
+
per_gpu_train_batch_size = None,
|
| 77 |
+
per_gpu_eval_batch_size = None,
|
| 78 |
+
gradient_accumulation_steps = 2,
|
| 79 |
+
eval_accumulation_steps = 2,
|
| 80 |
+
eval_delay = 0,
|
| 81 |
+
torch_empty_cache_steps = 250,
|
| 82 |
+
learning_rate = 5e-05,
|
| 83 |
+
weight_decay = 0.01,
|
| 84 |
+
adam_beta1 = 0.9,
|
| 85 |
+
adam_beta2 = 0.999,
|
| 86 |
+
adam_epsilon = 1e-08,
|
| 87 |
+
max_grad_norm = 1.0,
|
| 88 |
+
num_train_epochs = 3.0,
|
| 89 |
+
max_steps = -1,
|
| 90 |
+
lr_scheduler_type = 'linear',
|
| 91 |
+
warmup_ratio = 0.1,
|
| 92 |
+
warmup_steps = 0,
|
| 93 |
+
log_level = 'passive',
|
| 94 |
+
log_level_replica = 'warning',
|
| 95 |
+
log_on_each_node = True,
|
| 96 |
+
logging_dir = None,
|
| 97 |
+
logging_strategy = 'steps',
|
| 98 |
+
logging_first_step = False,
|
| 99 |
+
logging_steps = 1,
|
| 100 |
+
logging_nan_inf_filter = False,
|
| 101 |
+
save_strategy = 'steps',
|
| 102 |
+
save_steps = 500,
|
| 103 |
+
save_total_limit = None,
|
| 104 |
+
save_safetensors = True,
|
| 105 |
+
save_on_each_node = False,
|
| 106 |
+
save_only_model = False,
|
| 107 |
+
restore_callback_states_from_checkpoint = False,
|
| 108 |
+
no_cuda = False,
|
| 109 |
+
use_cpu = False,
|
| 110 |
+
use_mps_device = False,
|
| 111 |
+
seed = 3407,
|
| 112 |
+
data_seed = 3407,
|
| 113 |
+
jit_mode_eval = False,
|
| 114 |
+
use_ipex = False,
|
| 115 |
+
bf16 = False,
|
| 116 |
+
fp16 = False,
|
| 117 |
+
fp16_opt_level = 'O1',
|
| 118 |
+
half_precision_backend = 'auto',
|
| 119 |
+
bf16_full_eval = False,
|
| 120 |
+
fp16_full_eval = False,
|
| 121 |
+
tf32 = None,
|
| 122 |
+
local_rank = -1,
|
| 123 |
+
ddp_backend = None,
|
| 124 |
+
tpu_num_cores = None,
|
| 125 |
+
tpu_metrics_debug = False,
|
| 126 |
+
debug = '',
|
| 127 |
+
dataloader_drop_last = False,
|
| 128 |
+
eval_steps = None,
|
| 129 |
+
dataloader_num_workers = 0,
|
| 130 |
+
dataloader_prefetch_factor = None,
|
| 131 |
+
past_index = -1,
|
| 132 |
+
run_name = None,
|
| 133 |
+
disable_tqdm = None,
|
| 134 |
+
remove_unused_columns = True,
|
| 135 |
+
label_names = None,
|
| 136 |
+
load_best_model_at_end = False,
|
| 137 |
+
metric_for_best_model = None,
|
| 138 |
+
greater_is_better = None,
|
| 139 |
+
ignore_data_skip = False,
|
| 140 |
+
fsdp = '',
|
| 141 |
+
fsdp_min_num_params = 0,
|
| 142 |
+
fsdp_config = None,
|
| 143 |
+
tp_size = 0,
|
| 144 |
+
fsdp_transformer_layer_cls_to_wrap = None,
|
| 145 |
+
accelerator_config = None,
|
| 146 |
+
deepspeed = None,
|
| 147 |
+
label_smoothing_factor = 0.0,
|
| 148 |
+
optim = 'adamw_8bit',
|
| 149 |
+
optim_args = None,
|
| 150 |
+
adafactor = False,
|
| 151 |
+
group_by_length = False,
|
| 152 |
+
length_column_name = 'length',
|
| 153 |
+
report_to = None,
|
| 154 |
+
ddp_find_unused_parameters = None,
|
| 155 |
+
ddp_bucket_cap_mb = None,
|
| 156 |
+
ddp_broadcast_buffers = None,
|
| 157 |
+
dataloader_pin_memory = True,
|
| 158 |
+
dataloader_persistent_workers = False,
|
| 159 |
+
skip_memory_metrics = True,
|
| 160 |
+
use_legacy_prediction_loop = False,
|
| 161 |
+
push_to_hub = False,
|
| 162 |
+
resume_from_checkpoint = None,
|
| 163 |
+
hub_model_id = None,
|
| 164 |
+
hub_strategy = 'every_save',
|
| 165 |
+
hub_token = None,
|
| 166 |
+
hub_private_repo = None,
|
| 167 |
+
hub_always_push = False,
|
| 168 |
+
gradient_checkpointing = False,
|
| 169 |
+
gradient_checkpointing_kwargs = None,
|
| 170 |
+
include_inputs_for_metrics = False,
|
| 171 |
+
eval_do_concat_batches = True,
|
| 172 |
+
fp16_backend = 'auto',
|
| 173 |
+
push_to_hub_model_id = None,
|
| 174 |
+
push_to_hub_organization = None,
|
| 175 |
+
push_to_hub_token = None,
|
| 176 |
+
mp_parameters = '',
|
| 177 |
+
auto_find_batch_size = False,
|
| 178 |
+
full_determinism = False,
|
| 179 |
+
torchdynamo = None,
|
| 180 |
+
ray_scope = 'last',
|
| 181 |
+
ddp_timeout = 1800,
|
| 182 |
+
torch_compile = False,
|
| 183 |
+
torch_compile_backend = None,
|
| 184 |
+
torch_compile_mode = None,
|
| 185 |
+
include_tokens_per_second = False,
|
| 186 |
+
include_num_input_tokens_seen = False,
|
| 187 |
+
neftune_noise_alpha = None,
|
| 188 |
+
optim_target_modules = None,
|
| 189 |
+
batch_eval_metrics = False,
|
| 190 |
+
eval_on_start = False,
|
| 191 |
+
use_liger_kernel = False,
|
| 192 |
+
eval_use_gather_object = False,
|
| 193 |
+
average_tokens_across_devices = False,
|
| 194 |
+
reward_model_path = None,
|
| 195 |
+
judge = None,
|
| 196 |
+
max_new_tokens = 64,
|
| 197 |
+
max_length = 512,
|
| 198 |
+
temperature = 0.9,
|
| 199 |
+
missing_eos_penalty = None,
|
| 200 |
+
loss_type = 'sigmoid',
|
| 201 |
+
dataset_num_proc = None,
|
| 202 |
+
disable_dropout = True,
|
| 203 |
+
use_vllm = False,
|
| 204 |
+
ds3_gather_for_generation = True,
|
| 205 |
+
vllm_sampling_params = None,
|
| 206 |
+
unsloth_num_chunks = -1,
|
| 207 |
+
**kwargs,
|
| 208 |
+
):
|
| 209 |
+
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
| 210 |
+
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
| 211 |
+
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
| 212 |
+
output_dir = 'unsloth_training_checkpoints'
|
| 213 |
+
save_strategy = 'no'
|
| 214 |
+
if dataset_num_proc is None:
|
| 215 |
+
from multiprocessing import cpu_count
|
| 216 |
+
dataset_num_proc = cpu_count()
|
| 217 |
+
|
| 218 |
+
super().__init__(
|
| 219 |
+
output_dir = output_dir,
|
| 220 |
+
overwrite_output_dir = overwrite_output_dir,
|
| 221 |
+
do_train = do_train,
|
| 222 |
+
do_eval = do_eval,
|
| 223 |
+
do_predict = do_predict,
|
| 224 |
+
eval_strategy = eval_strategy,
|
| 225 |
+
prediction_loss_only = prediction_loss_only,
|
| 226 |
+
per_device_train_batch_size = per_device_train_batch_size,
|
| 227 |
+
per_device_eval_batch_size = per_device_eval_batch_size,
|
| 228 |
+
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
| 229 |
+
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
| 230 |
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 231 |
+
eval_accumulation_steps = eval_accumulation_steps,
|
| 232 |
+
eval_delay = eval_delay,
|
| 233 |
+
torch_empty_cache_steps = torch_empty_cache_steps,
|
| 234 |
+
learning_rate = learning_rate,
|
| 235 |
+
weight_decay = weight_decay,
|
| 236 |
+
adam_beta1 = adam_beta1,
|
| 237 |
+
adam_beta2 = adam_beta2,
|
| 238 |
+
adam_epsilon = adam_epsilon,
|
| 239 |
+
max_grad_norm = max_grad_norm,
|
| 240 |
+
num_train_epochs = num_train_epochs,
|
| 241 |
+
max_steps = max_steps,
|
| 242 |
+
lr_scheduler_type = lr_scheduler_type,
|
| 243 |
+
warmup_ratio = warmup_ratio,
|
| 244 |
+
warmup_steps = warmup_steps,
|
| 245 |
+
log_level = log_level,
|
| 246 |
+
log_level_replica = log_level_replica,
|
| 247 |
+
log_on_each_node = log_on_each_node,
|
| 248 |
+
logging_dir = logging_dir,
|
| 249 |
+
logging_strategy = logging_strategy,
|
| 250 |
+
logging_first_step = logging_first_step,
|
| 251 |
+
logging_steps = logging_steps,
|
| 252 |
+
logging_nan_inf_filter = logging_nan_inf_filter,
|
| 253 |
+
save_strategy = save_strategy,
|
| 254 |
+
save_steps = save_steps,
|
| 255 |
+
save_total_limit = save_total_limit,
|
| 256 |
+
save_safetensors = save_safetensors,
|
| 257 |
+
save_on_each_node = save_on_each_node,
|
| 258 |
+
save_only_model = save_only_model,
|
| 259 |
+
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
| 260 |
+
no_cuda = no_cuda,
|
| 261 |
+
use_cpu = use_cpu,
|
| 262 |
+
use_mps_device = use_mps_device,
|
| 263 |
+
seed = seed,
|
| 264 |
+
data_seed = data_seed,
|
| 265 |
+
jit_mode_eval = jit_mode_eval,
|
| 266 |
+
use_ipex = use_ipex,
|
| 267 |
+
bf16 = bf16,
|
| 268 |
+
fp16 = fp16,
|
| 269 |
+
fp16_opt_level = fp16_opt_level,
|
| 270 |
+
half_precision_backend = half_precision_backend,
|
| 271 |
+
bf16_full_eval = bf16_full_eval,
|
| 272 |
+
fp16_full_eval = fp16_full_eval,
|
| 273 |
+
tf32 = tf32,
|
| 274 |
+
local_rank = local_rank,
|
| 275 |
+
ddp_backend = ddp_backend,
|
| 276 |
+
tpu_num_cores = tpu_num_cores,
|
| 277 |
+
tpu_metrics_debug = tpu_metrics_debug,
|
| 278 |
+
debug = debug,
|
| 279 |
+
dataloader_drop_last = dataloader_drop_last,
|
| 280 |
+
eval_steps = eval_steps,
|
| 281 |
+
dataloader_num_workers = dataloader_num_workers,
|
| 282 |
+
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
| 283 |
+
past_index = past_index,
|
| 284 |
+
run_name = run_name,
|
| 285 |
+
disable_tqdm = disable_tqdm,
|
| 286 |
+
remove_unused_columns = remove_unused_columns,
|
| 287 |
+
label_names = label_names,
|
| 288 |
+
load_best_model_at_end = load_best_model_at_end,
|
| 289 |
+
metric_for_best_model = metric_for_best_model,
|
| 290 |
+
greater_is_better = greater_is_better,
|
| 291 |
+
ignore_data_skip = ignore_data_skip,
|
| 292 |
+
fsdp = fsdp,
|
| 293 |
+
fsdp_min_num_params = fsdp_min_num_params,
|
| 294 |
+
fsdp_config = fsdp_config,
|
| 295 |
+
tp_size = tp_size,
|
| 296 |
+
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
| 297 |
+
accelerator_config = accelerator_config,
|
| 298 |
+
deepspeed = deepspeed,
|
| 299 |
+
label_smoothing_factor = label_smoothing_factor,
|
| 300 |
+
optim = optim,
|
| 301 |
+
optim_args = optim_args,
|
| 302 |
+
adafactor = adafactor,
|
| 303 |
+
group_by_length = group_by_length,
|
| 304 |
+
length_column_name = length_column_name,
|
| 305 |
+
report_to = report_to,
|
| 306 |
+
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
| 307 |
+
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
| 308 |
+
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
| 309 |
+
dataloader_pin_memory = dataloader_pin_memory,
|
| 310 |
+
dataloader_persistent_workers = dataloader_persistent_workers,
|
| 311 |
+
skip_memory_metrics = skip_memory_metrics,
|
| 312 |
+
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
| 313 |
+
push_to_hub = push_to_hub,
|
| 314 |
+
resume_from_checkpoint = resume_from_checkpoint,
|
| 315 |
+
hub_model_id = hub_model_id,
|
| 316 |
+
hub_strategy = hub_strategy,
|
| 317 |
+
hub_token = hub_token,
|
| 318 |
+
hub_private_repo = hub_private_repo,
|
| 319 |
+
hub_always_push = hub_always_push,
|
| 320 |
+
gradient_checkpointing = gradient_checkpointing,
|
| 321 |
+
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
| 322 |
+
include_inputs_for_metrics = include_inputs_for_metrics,
|
| 323 |
+
eval_do_concat_batches = eval_do_concat_batches,
|
| 324 |
+
fp16_backend = fp16_backend,
|
| 325 |
+
push_to_hub_model_id = push_to_hub_model_id,
|
| 326 |
+
push_to_hub_organization = push_to_hub_organization,
|
| 327 |
+
push_to_hub_token = push_to_hub_token,
|
| 328 |
+
mp_parameters = mp_parameters,
|
| 329 |
+
auto_find_batch_size = auto_find_batch_size,
|
| 330 |
+
full_determinism = full_determinism,
|
| 331 |
+
torchdynamo = torchdynamo,
|
| 332 |
+
ray_scope = ray_scope,
|
| 333 |
+
ddp_timeout = ddp_timeout,
|
| 334 |
+
torch_compile = torch_compile,
|
| 335 |
+
torch_compile_backend = torch_compile_backend,
|
| 336 |
+
torch_compile_mode = torch_compile_mode,
|
| 337 |
+
include_tokens_per_second = include_tokens_per_second,
|
| 338 |
+
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
| 339 |
+
neftune_noise_alpha = neftune_noise_alpha,
|
| 340 |
+
optim_target_modules = optim_target_modules,
|
| 341 |
+
batch_eval_metrics = batch_eval_metrics,
|
| 342 |
+
eval_on_start = eval_on_start,
|
| 343 |
+
use_liger_kernel = use_liger_kernel,
|
| 344 |
+
eval_use_gather_object = eval_use_gather_object,
|
| 345 |
+
average_tokens_across_devices = average_tokens_across_devices,
|
| 346 |
+
reward_model_path = reward_model_path,
|
| 347 |
+
judge = judge,
|
| 348 |
+
max_new_tokens = max_new_tokens,
|
| 349 |
+
max_length = max_length,
|
| 350 |
+
temperature = temperature,
|
| 351 |
+
missing_eos_penalty = missing_eos_penalty,
|
| 352 |
+
loss_type = loss_type,
|
| 353 |
+
dataset_num_proc = dataset_num_proc,
|
| 354 |
+
disable_dropout = disable_dropout,
|
| 355 |
+
use_vllm = use_vllm,
|
| 356 |
+
ds3_gather_for_generation = ds3_gather_for_generation,**kwargs)
|
| 357 |
+
self.vllm_sampling_params = vllm_sampling_params
|
| 358 |
+
self.unsloth_num_chunks = unsloth_num_chunks
|
| 359 |
+
pass
|
| 360 |
+
|
| 361 |
+
class _UnslothNashMDTrainer(OnlineDPOTrainer):
|
| 362 |
+
r""""""
|
| 363 |
+
|
| 364 |
+
_tag_names = ["trl", "nash-md"]
|
| 365 |
+
|
| 366 |
+
def __init__(
|
| 367 |
+
self,
|
| 368 |
+
model: Union[PreTrainedModel, nn.Module] = None,
|
| 369 |
+
ref_model: Union[PreTrainedModel, nn.Module] = None,
|
| 370 |
+
reward_model: Union[PreTrainedModel, nn.Module, None] = None,
|
| 371 |
+
judge: Optional[BasePairwiseJudge] = None,
|
| 372 |
+
args: Optional[NashMDConfig] = None,
|
| 373 |
+
data_collator: Optional[Callable] = None,
|
| 374 |
+
train_dataset: Optional[Union[Dataset, IterableDataset]] = None,
|
| 375 |
+
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
|
| 376 |
+
processing_class: Optional[
|
| 377 |
+
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
|
| 378 |
+
] = None,
|
| 379 |
+
peft_config: Optional[dict] = None,
|
| 380 |
+
compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None,
|
| 381 |
+
callbacks: Optional[list[TrainerCallback]] = None,
|
| 382 |
+
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
| 383 |
+
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
| 384 |
+
) -> None:
|
| 385 |
+
super().__init__(
|
| 386 |
+
model=model,
|
| 387 |
+
ref_model=ref_model,
|
| 388 |
+
reward_model=reward_model,
|
| 389 |
+
judge=judge,
|
| 390 |
+
args=args,
|
| 391 |
+
data_collator=data_collator,
|
| 392 |
+
train_dataset=train_dataset,
|
| 393 |
+
eval_dataset=eval_dataset,
|
| 394 |
+
processing_class=processing_class,
|
| 395 |
+
reward_processing_class=processing_class, # for now, NashMDTrainer can't use any reward model
|
| 396 |
+
peft_config=peft_config,
|
| 397 |
+
compute_metrics=compute_metrics,
|
| 398 |
+
callbacks=callbacks,
|
| 399 |
+
optimizers=optimizers,
|
| 400 |
+
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
self._mixture_coef = self.args.mixture_coef
|
| 404 |
+
|
| 405 |
+
# Overwrite the stats dictionary to include NashMD specific statistics
|
| 406 |
+
self.stats = {
|
| 407 |
+
# Remove "non_score_reward", "rlhf_reward", "scores_margin"
|
| 408 |
+
# Add "mixture_coef"
|
| 409 |
+
"loss/kl": [],
|
| 410 |
+
"objective/entropy": [],
|
| 411 |
+
"loss/score": [],
|
| 412 |
+
"rewards/probabilities": [],
|
| 413 |
+
"rewards/accuracies": [],
|
| 414 |
+
"rewards/margins": [],
|
| 415 |
+
"logps/chosen": [],
|
| 416 |
+
"logps/rejected": [],
|
| 417 |
+
"val/model_contain_eos_token": [],
|
| 418 |
+
"val/ref_contain_eos_token": [],
|
| 419 |
+
"beta": [],
|
| 420 |
+
"mixture_coef": [],
|
| 421 |
+
}
|
| 422 |
+
if self.reward_model is not None:
|
| 423 |
+
self.stats["rewards/chosen"] = []
|
| 424 |
+
self.stats["rewards/rejected"] = []
|
| 425 |
+
|
| 426 |
+
@property
|
| 427 |
+
def mixture_coef(self):
|
| 428 |
+
if isinstance(self._mixture_coef, list):
|
| 429 |
+
epoch = self.state.epoch
|
| 430 |
+
return self._mixture_coef[epoch] if epoch < len(self._mixture_coef) else self._mixture_coef[-1]
|
| 431 |
+
else:
|
| 432 |
+
return self._mixture_coef
|
| 433 |
+
|
| 434 |
+
def _generate_completions(self, model, prompts):
|
| 435 |
+
with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model:
|
| 436 |
+
model_output = unwrapped_model.generate(
|
| 437 |
+
input_ids=prompts["input_ids"],
|
| 438 |
+
attention_mask=prompts["attention_mask"],
|
| 439 |
+
generation_config=self.generation_config,
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
ref_model = model if self.ref_model is None else self.ref_model
|
| 443 |
+
with torch.no_grad(), unwrap_model_for_generation(ref_model, self.accelerator) as unwrapped_ref_model:
|
| 444 |
+
mixture_model = GeometricMixtureWrapper(
|
| 445 |
+
model=unwrapped_model,
|
| 446 |
+
ref_model=unwrapped_ref_model,
|
| 447 |
+
generation_config=self.generation_config,
|
| 448 |
+
mixture_coef=self.mixture_coef,
|
| 449 |
+
device=self.accelerator.device,
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
mixture_output = mixture_model.generate(
|
| 453 |
+
input_ids=prompts["input_ids"],
|
| 454 |
+
attention_mask=prompts["attention_mask"],
|
| 455 |
+
generation_config=self.generation_config,
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
return model_output, mixture_output
|
| 459 |
+
|
| 460 |
+
def _process_completions(self, model_output, mixture_output, prompts):
|
| 461 |
+
context_length = prompts["input_ids"].shape[1]
|
| 462 |
+
|
| 463 |
+
# Process model completions
|
| 464 |
+
model_completion_ids = model_output[:, context_length:]
|
| 465 |
+
model_completion_ids, model_completion_mask = truncate_right(
|
| 466 |
+
model_completion_ids, self.processing_class.eos_token_id, self.processing_class.pad_token_id
|
| 467 |
+
)
|
| 468 |
+
model_data = {
|
| 469 |
+
"input_ids": torch.cat((prompts["input_ids"], model_completion_ids), dim=1),
|
| 470 |
+
"attention_mask": torch.cat((prompts["attention_mask"], model_completion_mask), dim=1),
|
| 471 |
+
"raw": prompts["raw"],
|
| 472 |
+
}
|
| 473 |
+
|
| 474 |
+
# Process reference model completions
|
| 475 |
+
mixture_completion_ids = mixture_output[:, context_length:]
|
| 476 |
+
mixture_completion_ids, mixture_completion_mask = truncate_right(
|
| 477 |
+
mixture_completion_ids, self.processing_class.eos_token_id, self.processing_class.pad_token_id
|
| 478 |
+
)
|
| 479 |
+
mixture_data = {
|
| 480 |
+
"input_ids": torch.cat((prompts["input_ids"], mixture_completion_ids), dim=1),
|
| 481 |
+
"attention_mask": torch.cat((prompts["attention_mask"], mixture_completion_mask), dim=1),
|
| 482 |
+
"raw": prompts["raw"],
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
return model_data, mixture_data
|
| 486 |
+
|
| 487 |
+
def _compute_rewards(self, model_data, mixture_data, context_length):
|
| 488 |
+
with torch.no_grad():
|
| 489 |
+
_, model_scores, _ = get_reward(
|
| 490 |
+
self.reward_model, model_data["input_ids"], self.processing_class.pad_token_id, context_length
|
| 491 |
+
)
|
| 492 |
+
_, mixture_scores, _ = get_reward(
|
| 493 |
+
self.reward_model, mixture_data["input_ids"], self.processing_class.pad_token_id, context_length
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
# Apply EOS penalty if needed
|
| 497 |
+
if self.args.missing_eos_penalty is not None:
|
| 498 |
+
model_contain_eos = torch.any(model_data["input_ids"] == self.processing_class.eos_token_id, dim=-1)
|
| 499 |
+
mixture_contain_eos = torch.any(mixture_data["input_ids"] == self.processing_class.eos_token_id, dim=-1)
|
| 500 |
+
model_scores[~model_contain_eos] -= self.args.missing_eos_penalty
|
| 501 |
+
mixture_scores[~mixture_contain_eos] -= self.args.missing_eos_penalty
|
| 502 |
+
|
| 503 |
+
return model_scores, mixture_scores
|
| 504 |
+
|
| 505 |
+
def _compute_judge(self, model_data, mixture_data, context_length):
|
| 506 |
+
prompts = model_data["raw"]
|
| 507 |
+
model_data_completions = self.processing_class.batch_decode(
|
| 508 |
+
model_data["input_ids"][:, context_length:], skip_special_tokens=True
|
| 509 |
+
)
|
| 510 |
+
model_data_completions = [completion.strip() for completion in model_data_completions]
|
| 511 |
+
|
| 512 |
+
mixture_data_completions = self.processing_class.batch_decode(
|
| 513 |
+
mixture_data["input_ids"][:, context_length:], skip_special_tokens=True
|
| 514 |
+
)
|
| 515 |
+
mixture_data_completions = [completion.strip() for completion in mixture_data_completions]
|
| 516 |
+
if is_conversational({"prompt": prompts[0]}):
|
| 517 |
+
model_data_completions = [
|
| 518 |
+
[{"role": "assistant", "content": completion}] for completion in model_data_completions
|
| 519 |
+
]
|
| 520 |
+
environment = jinja2.Environment()
|
| 521 |
+
template = environment.from_string(SIMPLE_CHAT_TEMPLATE)
|
| 522 |
+
prompts = [template.render(messages=message) for message in prompts]
|
| 523 |
+
model_data_completions = [template.render(messages=completion) for completion in model_data_completions]
|
| 524 |
+
|
| 525 |
+
mixture_data_completions = [
|
| 526 |
+
[{"role": "assistant", "content": completion}] for completion in mixture_data_completions
|
| 527 |
+
]
|
| 528 |
+
mixture_data_completions = [
|
| 529 |
+
template.render(messages=completion) for completion in mixture_data_completions
|
| 530 |
+
]
|
| 531 |
+
|
| 532 |
+
probability = self.judge.judge(
|
| 533 |
+
prompts,
|
| 534 |
+
list(zip(model_data_completions, mixture_data_completions)),
|
| 535 |
+
return_scores=True,
|
| 536 |
+
)
|
| 537 |
+
return torch.tensor(probability, device=model_data["input_ids"].device)
|
| 538 |
+
|
| 539 |
+
def _compute_logprobs(self, model, model_data, context_length):
|
| 540 |
+
def compute_logprobs_for_data(m, data):
|
| 541 |
+
output = m(data["input_ids"], attention_mask=data["attention_mask"])
|
| 542 |
+
logits = output.logits[:, context_length - 1 : -1]
|
| 543 |
+
token_logprobs = selective_log_softmax(logits, data["input_ids"][:, context_length:])
|
| 544 |
+
return token_logprobs
|
| 545 |
+
|
| 546 |
+
# Compute logprobs for model completions under the model
|
| 547 |
+
model_logprobs_model_data = compute_logprobs_for_data(model, model_data)
|
| 548 |
+
|
| 549 |
+
# Compute logprobs of model completions under the reference model
|
| 550 |
+
with torch.no_grad():
|
| 551 |
+
if self.ref_model is None:
|
| 552 |
+
with model.disable_adapter():
|
| 553 |
+
ref_logprobs_model_data = compute_logprobs_for_data(model, model_data)
|
| 554 |
+
else:
|
| 555 |
+
ref_logprobs_model_data = compute_logprobs_for_data(self.ref_model, model_data)
|
| 556 |
+
|
| 557 |
+
# Mask padding tokens
|
| 558 |
+
model_padding_mask = model_data["attention_mask"][:, context_length:] == 0
|
| 559 |
+
model_logprobs_model_data = model_logprobs_model_data.masked_fill(model_padding_mask, 0.0)
|
| 560 |
+
ref_logprobs_model_data = ref_logprobs_model_data.masked_fill(model_padding_mask, 0.0)
|
| 561 |
+
|
| 562 |
+
return (model_logprobs_model_data, ref_logprobs_model_data)
|
| 563 |
+
|
| 564 |
+
def _compute_losses(
|
| 565 |
+
self,
|
| 566 |
+
model_logprobs_model_data,
|
| 567 |
+
ref_logprobs_model_data,
|
| 568 |
+
probability,
|
| 569 |
+
):
|
| 570 |
+
# reinforce score where 0.5 is a control variate
|
| 571 |
+
score = (probability - 0.5) * model_logprobs_model_data.sum(1)
|
| 572 |
+
|
| 573 |
+
# kl divergence via reinforce
|
| 574 |
+
with torch.no_grad():
|
| 575 |
+
log_ratio = model_logprobs_model_data - ref_logprobs_model_data
|
| 576 |
+
kl_div_log = log_ratio.sum(1)
|
| 577 |
+
kl_div_loss = (log_ratio * model_logprobs_model_data).sum(1)
|
| 578 |
+
|
| 579 |
+
# final loss
|
| 580 |
+
loss = self.beta * kl_div_loss - score
|
| 581 |
+
|
| 582 |
+
return loss.mean(), score, kl_div_log
|
| 583 |
+
|
| 584 |
+
def _log_statistics(
|
| 585 |
+
self,
|
| 586 |
+
model_data,
|
| 587 |
+
mixture_data,
|
| 588 |
+
model_logprobs_model_data,
|
| 589 |
+
ref_logprobs_model_data,
|
| 590 |
+
probability,
|
| 591 |
+
score,
|
| 592 |
+
kl_div,
|
| 593 |
+
context_length,
|
| 594 |
+
model_scores=None,
|
| 595 |
+
mixture_scores=None,
|
| 596 |
+
):
|
| 597 |
+
# Helper function to gather and compute mean
|
| 598 |
+
def gather_mean(tensor):
|
| 599 |
+
return self.accelerator.gather_for_metrics(tensor).mean().item()
|
| 600 |
+
|
| 601 |
+
# Log score
|
| 602 |
+
self.stats["loss/score"].append(gather_mean(score))
|
| 603 |
+
# Log KL divergence
|
| 604 |
+
self.stats["loss/kl"].append(gather_mean(kl_div))
|
| 605 |
+
|
| 606 |
+
# Log logprobs
|
| 607 |
+
model_logprobs_model_data_sum = model_logprobs_model_data.sum(1)
|
| 608 |
+
ref_logprobs_model_data_sum = ref_logprobs_model_data.sum(1)
|
| 609 |
+
|
| 610 |
+
self.stats["logps/chosen"].append(gather_mean(model_logprobs_model_data_sum))
|
| 611 |
+
self.stats["logps/rejected"].append(gather_mean(ref_logprobs_model_data_sum))
|
| 612 |
+
|
| 613 |
+
# Log rewards
|
| 614 |
+
if self.reward_model is not None:
|
| 615 |
+
self.stats["rewards/chosen"].append(gather_mean(model_scores))
|
| 616 |
+
self.stats["rewards/rejected"].append(gather_mean(mixture_scores))
|
| 617 |
+
|
| 618 |
+
# Log probabilities
|
| 619 |
+
self.stats["rewards/probabilities"].append(gather_mean(probability))
|
| 620 |
+
|
| 621 |
+
# Calculate entropy for model data
|
| 622 |
+
entropy_model_data = -model_logprobs_model_data.sum(1)
|
| 623 |
+
self.stats["objective/entropy"].append(gather_mean(entropy_model_data))
|
| 624 |
+
|
| 625 |
+
# Calculate margins
|
| 626 |
+
margin = model_logprobs_model_data_sum - ref_logprobs_model_data_sum
|
| 627 |
+
self.stats["rewards/margins"].append(gather_mean(margin))
|
| 628 |
+
|
| 629 |
+
# Calculate accuracy
|
| 630 |
+
accuracy = (margin > 0).float()
|
| 631 |
+
self.stats["rewards/accuracies"].append(gather_mean(accuracy))
|
| 632 |
+
|
| 633 |
+
# Log EOS token statistics
|
| 634 |
+
model_eos = (model_data["input_ids"][:, context_length:] == self.processing_class.eos_token_id).any(dim=1)
|
| 635 |
+
mixture_eos = (mixture_data["input_ids"][:, context_length:] == self.processing_class.eos_token_id).any(dim=1)
|
| 636 |
+
self.stats["val/model_contain_eos_token"].append(gather_mean(model_eos.float()))
|
| 637 |
+
self.stats["val/ref_contain_eos_token"].append(gather_mean(mixture_eos.float()))
|
| 638 |
+
|
| 639 |
+
# Log beta and mixture coef
|
| 640 |
+
self.stats["beta"].append(self.beta)
|
| 641 |
+
self.stats["mixture_coef"].append(self.mixture_coef)
|
| 642 |
+
|
| 643 |
+
def training_step(
|
| 644 |
+
self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None
|
| 645 |
+
) -> torch.Tensor:
|
| 646 |
+
model.train()
|
| 647 |
+
|
| 648 |
+
# Apply chat template and tokenize the input
|
| 649 |
+
batch_size = len(next(iter(inputs.values())))
|
| 650 |
+
prompts = inputs["prompt"]
|
| 651 |
+
inputs = [{k: v[i] for k, v in inputs.items()} for i in range(batch_size)]
|
| 652 |
+
inputs = [maybe_apply_chat_template(x, self.processing_class) for x in inputs]
|
| 653 |
+
inputs = [self.tokenize_row(x, self.model.config.is_encoder_decoder, self.processing_class) for x in inputs]
|
| 654 |
+
inputs = self.data_collator(inputs)
|
| 655 |
+
|
| 656 |
+
# need the prompt_ only
|
| 657 |
+
inputs = self._prepare_inputs(inputs)
|
| 658 |
+
context_length = inputs["prompt_input_ids"].shape[1]
|
| 659 |
+
prompts = {
|
| 660 |
+
"input_ids": inputs["prompt_input_ids"],
|
| 661 |
+
"attention_mask": inputs["prompt_attention_mask"],
|
| 662 |
+
"raw": prompts,
|
| 663 |
+
}
|
| 664 |
+
del inputs
|
| 665 |
+
|
| 666 |
+
# Sample completions from both the model and the reference model
|
| 667 |
+
model_output, mixture_output = self._generate_completions(model, prompts)
|
| 668 |
+
|
| 669 |
+
# Process model completions
|
| 670 |
+
model_data, mixture_data = self._process_completions(model_output, mixture_output, prompts)
|
| 671 |
+
|
| 672 |
+
# Compute rewards
|
| 673 |
+
if self.reward_model is not None:
|
| 674 |
+
model_scores, mixture_scores = self._compute_rewards(model_data, mixture_data, context_length)
|
| 675 |
+
# probability of the model data vs the mixture data
|
| 676 |
+
probability = F.sigmoid(model_scores - mixture_scores)
|
| 677 |
+
else:
|
| 678 |
+
model_scores, mixture_scores = None, None
|
| 679 |
+
probability = self._compute_judge(model_data, mixture_data, context_length)
|
| 680 |
+
|
| 681 |
+
# Compute logprobs
|
| 682 |
+
model_logprobs_model_data, ref_logprobs_model_data = self._compute_logprobs(model, model_data, context_length)
|
| 683 |
+
|
| 684 |
+
# Compute loss
|
| 685 |
+
loss, score, kl_div = self._compute_losses(model_logprobs_model_data, ref_logprobs_model_data, probability)
|
| 686 |
+
|
| 687 |
+
# Log everything
|
| 688 |
+
self._log_statistics(
|
| 689 |
+
model_data,
|
| 690 |
+
mixture_data,
|
| 691 |
+
model_logprobs_model_data.detach(),
|
| 692 |
+
ref_logprobs_model_data,
|
| 693 |
+
probability,
|
| 694 |
+
score.detach(),
|
| 695 |
+
kl_div.detach(),
|
| 696 |
+
context_length,
|
| 697 |
+
model_scores,
|
| 698 |
+
mixture_scores,
|
| 699 |
+
)
|
| 700 |
+
|
| 701 |
+
if (
|
| 702 |
+
self.args.torch_empty_cache_steps is not None
|
| 703 |
+
and self.state.global_step % self.args.torch_empty_cache_steps == 0
|
| 704 |
+
):
|
| 705 |
+
empty_cache()
|
| 706 |
+
|
| 707 |
+
kwargs = {}
|
| 708 |
+
# For LOMO optimizers you need to explicitly use the learning rate
|
| 709 |
+
if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]:
|
| 710 |
+
kwargs["learning_rate"] = self._get_learning_rate()
|
| 711 |
+
|
| 712 |
+
if self.args.n_gpu > 1:
|
| 713 |
+
loss = loss.mean() # mean() to average on multi-gpu parallel training
|
| 714 |
+
|
| 715 |
+
if self.use_apex:
|
| 716 |
+
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
|
| 717 |
+
scaled_loss.backward()
|
| 718 |
+
else:
|
| 719 |
+
self.accelerator.backward(loss, **kwargs)
|
| 720 |
+
|
| 721 |
+
return loss.detach() / self.args.gradient_accumulation_steps
|
| 722 |
+
|
| 723 |
+
def create_model_card(
|
| 724 |
+
self,
|
| 725 |
+
model_name: Optional[str] = None,
|
| 726 |
+
dataset_name: Optional[str] = None,
|
| 727 |
+
tags: Union[str, list[str], None] = None,
|
| 728 |
+
):
|
| 729 |
+
"""
|
| 730 |
+
Creates a draft of a model card using the information available to the `Trainer`.
|
| 731 |
+
|
| 732 |
+
Args:
|
| 733 |
+
model_name (`str` or `None`, *optional*, defaults to `None`):
|
| 734 |
+
Name of the model.
|
| 735 |
+
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
| 736 |
+
Name of the dataset used for training.
|
| 737 |
+
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
| 738 |
+
Tags to be associated with the model card.
|
| 739 |
+
"""
|
| 740 |
+
if not self.is_world_process_zero():
|
| 741 |
+
return
|
| 742 |
+
|
| 743 |
+
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
| 744 |
+
base_model = self.model.config._name_or_path
|
| 745 |
+
else:
|
| 746 |
+
base_model = None
|
| 747 |
+
|
| 748 |
+
tags = tags or []
|
| 749 |
+
if isinstance(tags, str):
|
| 750 |
+
tags = [tags]
|
| 751 |
+
|
| 752 |
+
if hasattr(self.model.config, "unsloth_version"):
|
| 753 |
+
tags.append("unsloth")
|
| 754 |
+
|
| 755 |
+
citation = textwrap.dedent("""\
|
| 756 |
+
@inproceedings{munos2024nash,
|
| 757 |
+
title = {{Nash Learning from Human Feedback}},
|
| 758 |
+
author = {R{\'{e}}mi Munos and Michal Valko and Daniele Calandriello and Mohammad Gheshlaghi Azar and Mark Rowland and Zhaohan Daniel Guo and Yunhao Tang and Matthieu Geist and Thomas Mesnard and C{\\^{o}}me Fiegel and Andrea Michi and Marco Selvi and Sertan Girgin and Nikola Momchev and Olivier Bachem and Daniel J. Mankowitz and Doina Precup and Bilal Piot},
|
| 759 |
+
year = 2024,
|
| 760 |
+
booktitle = {Forty-first International Conference on Machine Learning, {ICML} 2024, Vienna, Austria, July 21-27, 2024},
|
| 761 |
+
publisher = {OpenReview.net},
|
| 762 |
+
url = {https://openreview.net/forum?id=Y5AmNYiyCQ}
|
| 763 |
+
}""")
|
| 764 |
+
|
| 765 |
+
model_card = generate_model_card(
|
| 766 |
+
base_model=base_model,
|
| 767 |
+
model_name=model_name,
|
| 768 |
+
hub_model_id=self.hub_model_id,
|
| 769 |
+
dataset_name=dataset_name,
|
| 770 |
+
tags=tags,
|
| 771 |
+
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
| 772 |
+
comet_url=get_comet_experiment_url(),
|
| 773 |
+
trainer_name="Nash-MD",
|
| 774 |
+
trainer_citation=citation,
|
| 775 |
+
paper_title="Nash Learning from Human Feedback",
|
| 776 |
+
paper_id="2312.00886",
|
| 777 |
+
)
|
| 778 |
+
|
| 779 |
+
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
| 780 |
+
class UnslothNashMDTrainer(_UnslothNashMDTrainer):
|
| 781 |
+
"""
|
| 782 |
+
|
| 783 |
+
Initialize NashMDTrainer as a subclass of [`OnlineDPOConfig`].
|
| 784 |
+
|
| 785 |
+
Args:
|
| 786 |
+
model (`transformers.PreTrainedModel`):
|
| 787 |
+
The model to train, preferably an `AutoModelForCausalLM`.
|
| 788 |
+
ref_model (`PreTrainedModelWrapper`):
|
| 789 |
+
Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no
|
| 790 |
+
reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized.
|
| 791 |
+
reward_model (`transformers.PreTrainedModel`):
|
| 792 |
+
The reward model to score completions with, preferably an `AutoModelForSequenceClassification`.
|
| 793 |
+
judge (`BasePairwiseJudge`):
|
| 794 |
+
The judge to use for pairwise comparison of model completions.
|
| 795 |
+
args (`NashMDConfig`):
|
| 796 |
+
The NashMD config arguments to use for training.
|
| 797 |
+
data_collator (`transformers.DataCollator`):
|
| 798 |
+
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
|
| 799 |
+
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
|
| 800 |
+
train_dataset (`datasets.Dataset`):
|
| 801 |
+
The dataset to use for training.
|
| 802 |
+
eval_dataset (`datasets.Dataset`):
|
| 803 |
+
The dataset to use for evaluation.
|
| 804 |
+
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
|
| 805 |
+
Processing class used to process the data. If provided, will be used to automatically process the inputs
|
| 806 |
+
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
|
| 807 |
+
reuse the fine-tuned model.
|
| 808 |
+
peft_config (`dict`):
|
| 809 |
+
The peft config to use for training.
|
| 810 |
+
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
|
| 811 |
+
The function to use to compute the metrics. Must take a `EvalPrediction` and return
|
| 812 |
+
a dictionary string to metric values.
|
| 813 |
+
callbacks (`list[transformers.TrainerCallback]`):
|
| 814 |
+
The callbacks to use for training.
|
| 815 |
+
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
|
| 816 |
+
The optimizer and scheduler to use for training.
|
| 817 |
+
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
|
| 818 |
+
The function to use to preprocess the logits before computing the metrics.
|
| 819 |
+
|
| 820 |
+
"""
|
| 821 |
+
def __init__(
|
| 822 |
+
self,
|
| 823 |
+
model = None,
|
| 824 |
+
ref_model = None,
|
| 825 |
+
reward_model = None,
|
| 826 |
+
judge = None,
|
| 827 |
+
args = None,
|
| 828 |
+
data_collator = None,
|
| 829 |
+
train_dataset = None,
|
| 830 |
+
eval_dataset = None,
|
| 831 |
+
processing_class = None,
|
| 832 |
+
peft_config = None,
|
| 833 |
+
compute_metrics = None,
|
| 834 |
+
callbacks = None,
|
| 835 |
+
preprocess_logits_for_metrics = None,
|
| 836 |
+
**kwargs
|
| 837 |
+
):
|
| 838 |
+
if args is None: args = UnslothNashMDConfig()
|
| 839 |
+
use_bf16 = getattr(args, 'bf16', False)
|
| 840 |
+
use_fp16 = getattr(args, 'fp16', False)
|
| 841 |
+
force_float32 = False
|
| 842 |
+
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
| 843 |
+
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
| 844 |
+
force_float32 = True
|
| 845 |
+
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
| 846 |
+
dtype = getattr(model.config, 'torch_dtype', None)
|
| 847 |
+
if dtype is None: dtype = model.get_input_embeddings().dtype
|
| 848 |
+
from unsloth_zoo.utils import _get_dtype
|
| 849 |
+
dtype = _get_dtype(dtype)
|
| 850 |
+
float16 = dtype == torch.float16
|
| 851 |
+
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
| 852 |
+
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
| 853 |
+
if force_float32:
|
| 854 |
+
args.fp16 = False
|
| 855 |
+
args.bf16 = False
|
| 856 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
| 857 |
+
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
| 858 |
+
args.fp16 = float16
|
| 859 |
+
args.bf16 = not float16
|
| 860 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
| 861 |
+
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
| 862 |
+
args.eval_strategy = 'steps'
|
| 863 |
+
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
| 864 |
+
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
| 865 |
+
if ga_steps is not None and ga_steps > 1:
|
| 866 |
+
from transformers import __version__ as transformers_version
|
| 867 |
+
if Version(transformers_version) <= Version('4.45.2'):
|
| 868 |
+
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
| 869 |
+
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
| 870 |
+
if getattr(args, 'eval_strategy', 'no') != 'no':
|
| 871 |
+
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
| 872 |
+
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
| 873 |
+
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
| 874 |
+
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
| 875 |
+
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
| 876 |
+
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
| 877 |
+
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
| 878 |
+
if force_float32:
|
| 879 |
+
args.bf16_full_eval = False
|
| 880 |
+
args.fp16_full_eval = False
|
| 881 |
+
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
| 882 |
+
args.bf16_full_eval = True
|
| 883 |
+
args.fp16_full_eval = False
|
| 884 |
+
elif not bf16_full_eval and not fp16_full_eval:
|
| 885 |
+
args.bf16_full_eval = args.bf16
|
| 886 |
+
args.fp16_full_eval = args.fp16
|
| 887 |
+
_output_logits = False
|
| 888 |
+
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
| 889 |
+
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
| 890 |
+
if _output_logits:
|
| 891 |
+
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
| 892 |
+
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
| 893 |
+
pass
|
| 894 |
+
else:
|
| 895 |
+
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
| 896 |
+
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
| 897 |
+
if args_max_seq_length is None and model_max_seq_length is not None:
|
| 898 |
+
max_seq_length = model.max_seq_length
|
| 899 |
+
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
| 900 |
+
if model is not None and hasattr(model, 'for_training'):
|
| 901 |
+
model.for_training()
|
| 902 |
+
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
| 903 |
+
if 'processing_class' in locals():
|
| 904 |
+
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
| 905 |
+
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
| 906 |
+
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
| 907 |
+
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
| 908 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 909 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
| 910 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer, mlm = False)
|
| 911 |
+
elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
| 912 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer)
|
| 913 |
+
else:
|
| 914 |
+
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
| 915 |
+
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
| 916 |
+
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
| 917 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 918 |
+
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
| 919 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
| 920 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer)
|
| 921 |
+
else:
|
| 922 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False)
|
| 923 |
+
other_metrics = []
|
| 924 |
+
|
| 925 |
+
from unsloth_zoo.logging_utils import PatchRLStatistics
|
| 926 |
+
PatchRLStatistics('nash_md_trainer', other_metrics)
|
| 927 |
+
|
| 928 |
+
super().__init__(
|
| 929 |
+
model = model,
|
| 930 |
+
ref_model = ref_model,
|
| 931 |
+
reward_model = reward_model,
|
| 932 |
+
judge = judge,
|
| 933 |
+
args = args,
|
| 934 |
+
data_collator = data_collator,
|
| 935 |
+
train_dataset = train_dataset,
|
| 936 |
+
eval_dataset = eval_dataset,
|
| 937 |
+
processing_class = processing_class,
|
| 938 |
+
peft_config = peft_config,
|
| 939 |
+
compute_metrics = compute_metrics,
|
| 940 |
+
callbacks = callbacks,
|
| 941 |
+
preprocess_logits_for_metrics = preprocess_logits_for_metrics,**kwargs)
|
| 942 |
+
if hasattr(self, 'neftune_hook_handle'):
|
| 943 |
+
self.neftune_hook_handle.remove()
|
| 944 |
+
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
| 945 |
+
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
| 946 |
+
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
| 947 |
+
pass
|
| 948 |
+
|
| 949 |
+
pass
|
unsloth_compiled_cache/UnslothORPOTrainer.py
ADDED
|
@@ -0,0 +1,1537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
2025.4.1
|
| 3 |
+
2025.4.1
|
| 4 |
+
4.51.3
|
| 5 |
+
0.15.2
|
| 6 |
+
__UNSLOTH_VERSIONING__
|
| 7 |
+
"""
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from torch.nn import functional as F
|
| 12 |
+
from trl.trainer.orpo_trainer import (Any, AutoModelForCausalLM, BaseImageProcessor, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, Literal, ORPOConfig, ORPOTrainer, Optional, PartialState, PeftModel, PreTrainedModel, PreTrainedModelWrapper, PreTrainedTokenizerBase, ProcessorMixin, Trainer, TrainerCallback, Union, add_bos_token_if_needed, add_eos_token_if_needed, amp, deepcopy, defaultdict, disable_dropout_in_model, generate_model_card, get_comet_experiment_url, inspect, is_comet_available, is_peft_available, is_torch_fx_proxy, is_torch_xla_available, is_wandb_available, log_table_to_comet_experiment, maybe_apply_chat_template, maybe_extract_prompt, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_model_for_kbit_training, random, textwrap, torch, transformers, version, warnings)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from typing import *
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from packaging.version import Version
|
| 19 |
+
import torch
|
| 20 |
+
import numpy as np
|
| 21 |
+
from contextlib import nullcontext
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling
|
| 24 |
+
|
| 25 |
+
torch_compile_options = {
|
| 26 |
+
"epilogue_fusion" : True,
|
| 27 |
+
"max_autotune" : False,
|
| 28 |
+
"shape_padding" : True,
|
| 29 |
+
"trace.enabled" : False,
|
| 30 |
+
"triton.cudagraphs" : False,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
| 34 |
+
def selective_log_softmax(logits, index):
|
| 35 |
+
logits = logits.to(torch.float32)
|
| 36 |
+
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
| 37 |
+
# loop to reduce peak mem consumption
|
| 38 |
+
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
| 39 |
+
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
| 40 |
+
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
| 41 |
+
return per_token_logps
|
| 42 |
+
@dataclass
|
| 43 |
+
class UnslothORPOConfig(ORPOConfig):
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
Configuration class for the [`ORPOTrainer`].
|
| 47 |
+
|
| 48 |
+
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
| 49 |
+
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
| 50 |
+
command line.
|
| 51 |
+
|
| 52 |
+
Parameters:
|
| 53 |
+
learning_rate (`float`, *optional*, defaults to `1e-6`):
|
| 54 |
+
Initial learning rate for [`AdamW`] optimizer. The default value replaces that of
|
| 55 |
+
[`~transformers.TrainingArguments`].
|
| 56 |
+
max_length (`int` or `None`, *optional*, defaults to `1024`):
|
| 57 |
+
Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
|
| 58 |
+
to use the default data collator.
|
| 59 |
+
max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
|
| 60 |
+
Maximum length of the prompt. This argument is required if you want to use the default data collator.
|
| 61 |
+
max_completion_length (`int` or `None`, *optional*, defaults to `None`):
|
| 62 |
+
Maximum length of the completion. This argument is required if you want to use the default data collator
|
| 63 |
+
and your model is an encoder-decoder.
|
| 64 |
+
beta (`float`, *optional*, defaults to `0.1`):
|
| 65 |
+
Parameter controlling the relative ratio loss weight in the ORPO loss. In the [paper](https://huggingface.co/papers/2403.07691),
|
| 66 |
+
it is denoted by λ. In the [code](https://github.com/xfactlab/orpo), it is denoted by `alpha`.
|
| 67 |
+
disable_dropout (`bool`, *optional*, defaults to `True`):
|
| 68 |
+
Whether to disable dropout in the model.
|
| 69 |
+
label_pad_token_id (`int`, *optional*, defaults to `-100`):
|
| 70 |
+
Label pad token id. This argument is required if you want to use the default data collator.
|
| 71 |
+
padding_value (`int` or `None`, *optional*, defaults to `None`):
|
| 72 |
+
Padding value to use. If `None`, the padding value of the tokenizer is used.
|
| 73 |
+
truncation_mode (`str`, *optional*, defaults to `"keep_end"`):
|
| 74 |
+
Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
|
| 75 |
+
This argument is required if you want to use the default data collator.
|
| 76 |
+
generate_during_eval (`bool`, *optional*, defaults to `False`):
|
| 77 |
+
If `True`, generates and logs completions from the model to W&B or Comet during evaluation.
|
| 78 |
+
is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`):
|
| 79 |
+
When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
|
| 80 |
+
you need to specify if the model returned by the callable is an encoder-decoder model.
|
| 81 |
+
model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
|
| 82 |
+
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
|
| 83 |
+
string.
|
| 84 |
+
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
|
| 85 |
+
Number of processes to use for processing the dataset.
|
| 86 |
+
|
| 87 |
+
"""
|
| 88 |
+
vllm_sampling_params: Optional[Any] = field(
|
| 89 |
+
default = None,
|
| 90 |
+
metadata = {'help': 'vLLM SamplingParams'},
|
| 91 |
+
)
|
| 92 |
+
unsloth_num_chunks : Optional[int] = field(
|
| 93 |
+
default = -1,
|
| 94 |
+
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
| 95 |
+
)
|
| 96 |
+
def __init__(
|
| 97 |
+
self,
|
| 98 |
+
output_dir = None,
|
| 99 |
+
overwrite_output_dir = None,
|
| 100 |
+
do_train = False,
|
| 101 |
+
do_eval = False,
|
| 102 |
+
do_predict = False,
|
| 103 |
+
eval_strategy = 'no',
|
| 104 |
+
prediction_loss_only = False,
|
| 105 |
+
per_device_train_batch_size = 4,
|
| 106 |
+
per_device_eval_batch_size = 4,
|
| 107 |
+
per_gpu_train_batch_size = None,
|
| 108 |
+
per_gpu_eval_batch_size = None,
|
| 109 |
+
gradient_accumulation_steps = 2,
|
| 110 |
+
eval_accumulation_steps = 2,
|
| 111 |
+
eval_delay = 0,
|
| 112 |
+
torch_empty_cache_steps = 250,
|
| 113 |
+
learning_rate = 5e-05,
|
| 114 |
+
weight_decay = 0.01,
|
| 115 |
+
adam_beta1 = 0.9,
|
| 116 |
+
adam_beta2 = 0.999,
|
| 117 |
+
adam_epsilon = 1e-08,
|
| 118 |
+
max_grad_norm = 1.0,
|
| 119 |
+
num_train_epochs = 3.0,
|
| 120 |
+
max_steps = -1,
|
| 121 |
+
lr_scheduler_type = 'linear',
|
| 122 |
+
warmup_ratio = 0.1,
|
| 123 |
+
warmup_steps = 0,
|
| 124 |
+
log_level = 'passive',
|
| 125 |
+
log_level_replica = 'warning',
|
| 126 |
+
log_on_each_node = True,
|
| 127 |
+
logging_dir = None,
|
| 128 |
+
logging_strategy = 'steps',
|
| 129 |
+
logging_first_step = False,
|
| 130 |
+
logging_steps = 1,
|
| 131 |
+
logging_nan_inf_filter = False,
|
| 132 |
+
save_strategy = 'steps',
|
| 133 |
+
save_steps = 500,
|
| 134 |
+
save_total_limit = None,
|
| 135 |
+
save_safetensors = True,
|
| 136 |
+
save_on_each_node = False,
|
| 137 |
+
save_only_model = False,
|
| 138 |
+
restore_callback_states_from_checkpoint = False,
|
| 139 |
+
no_cuda = False,
|
| 140 |
+
use_cpu = False,
|
| 141 |
+
use_mps_device = False,
|
| 142 |
+
seed = 3407,
|
| 143 |
+
data_seed = 3407,
|
| 144 |
+
jit_mode_eval = False,
|
| 145 |
+
use_ipex = False,
|
| 146 |
+
bf16 = False,
|
| 147 |
+
fp16 = False,
|
| 148 |
+
fp16_opt_level = 'O1',
|
| 149 |
+
half_precision_backend = 'auto',
|
| 150 |
+
bf16_full_eval = False,
|
| 151 |
+
fp16_full_eval = False,
|
| 152 |
+
tf32 = None,
|
| 153 |
+
local_rank = -1,
|
| 154 |
+
ddp_backend = None,
|
| 155 |
+
tpu_num_cores = None,
|
| 156 |
+
tpu_metrics_debug = False,
|
| 157 |
+
debug = '',
|
| 158 |
+
dataloader_drop_last = False,
|
| 159 |
+
eval_steps = None,
|
| 160 |
+
dataloader_num_workers = 0,
|
| 161 |
+
dataloader_prefetch_factor = None,
|
| 162 |
+
past_index = -1,
|
| 163 |
+
run_name = None,
|
| 164 |
+
disable_tqdm = None,
|
| 165 |
+
remove_unused_columns = True,
|
| 166 |
+
label_names = None,
|
| 167 |
+
load_best_model_at_end = False,
|
| 168 |
+
metric_for_best_model = None,
|
| 169 |
+
greater_is_better = None,
|
| 170 |
+
ignore_data_skip = False,
|
| 171 |
+
fsdp = '',
|
| 172 |
+
fsdp_min_num_params = 0,
|
| 173 |
+
fsdp_config = None,
|
| 174 |
+
tp_size = 0,
|
| 175 |
+
fsdp_transformer_layer_cls_to_wrap = None,
|
| 176 |
+
accelerator_config = None,
|
| 177 |
+
deepspeed = None,
|
| 178 |
+
label_smoothing_factor = 0.0,
|
| 179 |
+
optim = 'adamw_8bit',
|
| 180 |
+
optim_args = None,
|
| 181 |
+
adafactor = False,
|
| 182 |
+
group_by_length = False,
|
| 183 |
+
length_column_name = 'length',
|
| 184 |
+
report_to = None,
|
| 185 |
+
ddp_find_unused_parameters = None,
|
| 186 |
+
ddp_bucket_cap_mb = None,
|
| 187 |
+
ddp_broadcast_buffers = None,
|
| 188 |
+
dataloader_pin_memory = True,
|
| 189 |
+
dataloader_persistent_workers = False,
|
| 190 |
+
skip_memory_metrics = True,
|
| 191 |
+
use_legacy_prediction_loop = False,
|
| 192 |
+
push_to_hub = False,
|
| 193 |
+
resume_from_checkpoint = None,
|
| 194 |
+
hub_model_id = None,
|
| 195 |
+
hub_strategy = 'every_save',
|
| 196 |
+
hub_token = None,
|
| 197 |
+
hub_private_repo = None,
|
| 198 |
+
hub_always_push = False,
|
| 199 |
+
gradient_checkpointing = False,
|
| 200 |
+
gradient_checkpointing_kwargs = None,
|
| 201 |
+
include_inputs_for_metrics = False,
|
| 202 |
+
eval_do_concat_batches = True,
|
| 203 |
+
fp16_backend = 'auto',
|
| 204 |
+
push_to_hub_model_id = None,
|
| 205 |
+
push_to_hub_organization = None,
|
| 206 |
+
push_to_hub_token = None,
|
| 207 |
+
mp_parameters = '',
|
| 208 |
+
auto_find_batch_size = False,
|
| 209 |
+
full_determinism = False,
|
| 210 |
+
torchdynamo = None,
|
| 211 |
+
ray_scope = 'last',
|
| 212 |
+
ddp_timeout = 1800,
|
| 213 |
+
torch_compile = False,
|
| 214 |
+
torch_compile_backend = None,
|
| 215 |
+
torch_compile_mode = None,
|
| 216 |
+
include_tokens_per_second = False,
|
| 217 |
+
include_num_input_tokens_seen = False,
|
| 218 |
+
neftune_noise_alpha = None,
|
| 219 |
+
optim_target_modules = None,
|
| 220 |
+
batch_eval_metrics = False,
|
| 221 |
+
eval_on_start = False,
|
| 222 |
+
use_liger_kernel = False,
|
| 223 |
+
eval_use_gather_object = False,
|
| 224 |
+
average_tokens_across_devices = False,
|
| 225 |
+
max_length = 1024,
|
| 226 |
+
max_prompt_length = 512,
|
| 227 |
+
max_completion_length = None,
|
| 228 |
+
beta = 0.1,
|
| 229 |
+
disable_dropout = True,
|
| 230 |
+
label_pad_token_id = -100,
|
| 231 |
+
padding_value = None,
|
| 232 |
+
truncation_mode = 'keep_end',
|
| 233 |
+
generate_during_eval = False,
|
| 234 |
+
is_encoder_decoder = None,
|
| 235 |
+
model_init_kwargs = None,
|
| 236 |
+
dataset_num_proc = None,
|
| 237 |
+
vllm_sampling_params = None,
|
| 238 |
+
unsloth_num_chunks = -1,
|
| 239 |
+
**kwargs,
|
| 240 |
+
):
|
| 241 |
+
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
| 242 |
+
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
| 243 |
+
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
| 244 |
+
output_dir = 'unsloth_training_checkpoints'
|
| 245 |
+
save_strategy = 'no'
|
| 246 |
+
if dataset_num_proc is None:
|
| 247 |
+
from multiprocessing import cpu_count
|
| 248 |
+
dataset_num_proc = cpu_count()
|
| 249 |
+
|
| 250 |
+
super().__init__(
|
| 251 |
+
output_dir = output_dir,
|
| 252 |
+
overwrite_output_dir = overwrite_output_dir,
|
| 253 |
+
do_train = do_train,
|
| 254 |
+
do_eval = do_eval,
|
| 255 |
+
do_predict = do_predict,
|
| 256 |
+
eval_strategy = eval_strategy,
|
| 257 |
+
prediction_loss_only = prediction_loss_only,
|
| 258 |
+
per_device_train_batch_size = per_device_train_batch_size,
|
| 259 |
+
per_device_eval_batch_size = per_device_eval_batch_size,
|
| 260 |
+
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
| 261 |
+
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
| 262 |
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 263 |
+
eval_accumulation_steps = eval_accumulation_steps,
|
| 264 |
+
eval_delay = eval_delay,
|
| 265 |
+
torch_empty_cache_steps = torch_empty_cache_steps,
|
| 266 |
+
learning_rate = learning_rate,
|
| 267 |
+
weight_decay = weight_decay,
|
| 268 |
+
adam_beta1 = adam_beta1,
|
| 269 |
+
adam_beta2 = adam_beta2,
|
| 270 |
+
adam_epsilon = adam_epsilon,
|
| 271 |
+
max_grad_norm = max_grad_norm,
|
| 272 |
+
num_train_epochs = num_train_epochs,
|
| 273 |
+
max_steps = max_steps,
|
| 274 |
+
lr_scheduler_type = lr_scheduler_type,
|
| 275 |
+
warmup_ratio = warmup_ratio,
|
| 276 |
+
warmup_steps = warmup_steps,
|
| 277 |
+
log_level = log_level,
|
| 278 |
+
log_level_replica = log_level_replica,
|
| 279 |
+
log_on_each_node = log_on_each_node,
|
| 280 |
+
logging_dir = logging_dir,
|
| 281 |
+
logging_strategy = logging_strategy,
|
| 282 |
+
logging_first_step = logging_first_step,
|
| 283 |
+
logging_steps = logging_steps,
|
| 284 |
+
logging_nan_inf_filter = logging_nan_inf_filter,
|
| 285 |
+
save_strategy = save_strategy,
|
| 286 |
+
save_steps = save_steps,
|
| 287 |
+
save_total_limit = save_total_limit,
|
| 288 |
+
save_safetensors = save_safetensors,
|
| 289 |
+
save_on_each_node = save_on_each_node,
|
| 290 |
+
save_only_model = save_only_model,
|
| 291 |
+
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
| 292 |
+
no_cuda = no_cuda,
|
| 293 |
+
use_cpu = use_cpu,
|
| 294 |
+
use_mps_device = use_mps_device,
|
| 295 |
+
seed = seed,
|
| 296 |
+
data_seed = data_seed,
|
| 297 |
+
jit_mode_eval = jit_mode_eval,
|
| 298 |
+
use_ipex = use_ipex,
|
| 299 |
+
bf16 = bf16,
|
| 300 |
+
fp16 = fp16,
|
| 301 |
+
fp16_opt_level = fp16_opt_level,
|
| 302 |
+
half_precision_backend = half_precision_backend,
|
| 303 |
+
bf16_full_eval = bf16_full_eval,
|
| 304 |
+
fp16_full_eval = fp16_full_eval,
|
| 305 |
+
tf32 = tf32,
|
| 306 |
+
local_rank = local_rank,
|
| 307 |
+
ddp_backend = ddp_backend,
|
| 308 |
+
tpu_num_cores = tpu_num_cores,
|
| 309 |
+
tpu_metrics_debug = tpu_metrics_debug,
|
| 310 |
+
debug = debug,
|
| 311 |
+
dataloader_drop_last = dataloader_drop_last,
|
| 312 |
+
eval_steps = eval_steps,
|
| 313 |
+
dataloader_num_workers = dataloader_num_workers,
|
| 314 |
+
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
| 315 |
+
past_index = past_index,
|
| 316 |
+
run_name = run_name,
|
| 317 |
+
disable_tqdm = disable_tqdm,
|
| 318 |
+
remove_unused_columns = remove_unused_columns,
|
| 319 |
+
label_names = label_names,
|
| 320 |
+
load_best_model_at_end = load_best_model_at_end,
|
| 321 |
+
metric_for_best_model = metric_for_best_model,
|
| 322 |
+
greater_is_better = greater_is_better,
|
| 323 |
+
ignore_data_skip = ignore_data_skip,
|
| 324 |
+
fsdp = fsdp,
|
| 325 |
+
fsdp_min_num_params = fsdp_min_num_params,
|
| 326 |
+
fsdp_config = fsdp_config,
|
| 327 |
+
tp_size = tp_size,
|
| 328 |
+
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
| 329 |
+
accelerator_config = accelerator_config,
|
| 330 |
+
deepspeed = deepspeed,
|
| 331 |
+
label_smoothing_factor = label_smoothing_factor,
|
| 332 |
+
optim = optim,
|
| 333 |
+
optim_args = optim_args,
|
| 334 |
+
adafactor = adafactor,
|
| 335 |
+
group_by_length = group_by_length,
|
| 336 |
+
length_column_name = length_column_name,
|
| 337 |
+
report_to = report_to,
|
| 338 |
+
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
| 339 |
+
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
| 340 |
+
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
| 341 |
+
dataloader_pin_memory = dataloader_pin_memory,
|
| 342 |
+
dataloader_persistent_workers = dataloader_persistent_workers,
|
| 343 |
+
skip_memory_metrics = skip_memory_metrics,
|
| 344 |
+
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
| 345 |
+
push_to_hub = push_to_hub,
|
| 346 |
+
resume_from_checkpoint = resume_from_checkpoint,
|
| 347 |
+
hub_model_id = hub_model_id,
|
| 348 |
+
hub_strategy = hub_strategy,
|
| 349 |
+
hub_token = hub_token,
|
| 350 |
+
hub_private_repo = hub_private_repo,
|
| 351 |
+
hub_always_push = hub_always_push,
|
| 352 |
+
gradient_checkpointing = gradient_checkpointing,
|
| 353 |
+
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
| 354 |
+
include_inputs_for_metrics = include_inputs_for_metrics,
|
| 355 |
+
eval_do_concat_batches = eval_do_concat_batches,
|
| 356 |
+
fp16_backend = fp16_backend,
|
| 357 |
+
push_to_hub_model_id = push_to_hub_model_id,
|
| 358 |
+
push_to_hub_organization = push_to_hub_organization,
|
| 359 |
+
push_to_hub_token = push_to_hub_token,
|
| 360 |
+
mp_parameters = mp_parameters,
|
| 361 |
+
auto_find_batch_size = auto_find_batch_size,
|
| 362 |
+
full_determinism = full_determinism,
|
| 363 |
+
torchdynamo = torchdynamo,
|
| 364 |
+
ray_scope = ray_scope,
|
| 365 |
+
ddp_timeout = ddp_timeout,
|
| 366 |
+
torch_compile = torch_compile,
|
| 367 |
+
torch_compile_backend = torch_compile_backend,
|
| 368 |
+
torch_compile_mode = torch_compile_mode,
|
| 369 |
+
include_tokens_per_second = include_tokens_per_second,
|
| 370 |
+
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
| 371 |
+
neftune_noise_alpha = neftune_noise_alpha,
|
| 372 |
+
optim_target_modules = optim_target_modules,
|
| 373 |
+
batch_eval_metrics = batch_eval_metrics,
|
| 374 |
+
eval_on_start = eval_on_start,
|
| 375 |
+
use_liger_kernel = use_liger_kernel,
|
| 376 |
+
eval_use_gather_object = eval_use_gather_object,
|
| 377 |
+
average_tokens_across_devices = average_tokens_across_devices,
|
| 378 |
+
max_length = max_length,
|
| 379 |
+
max_prompt_length = max_prompt_length,
|
| 380 |
+
max_completion_length = max_completion_length,
|
| 381 |
+
beta = beta,
|
| 382 |
+
disable_dropout = disable_dropout,
|
| 383 |
+
label_pad_token_id = label_pad_token_id,
|
| 384 |
+
padding_value = padding_value,
|
| 385 |
+
truncation_mode = truncation_mode,
|
| 386 |
+
generate_during_eval = generate_during_eval,
|
| 387 |
+
is_encoder_decoder = is_encoder_decoder,
|
| 388 |
+
model_init_kwargs = model_init_kwargs,
|
| 389 |
+
dataset_num_proc = dataset_num_proc,**kwargs)
|
| 390 |
+
self.vllm_sampling_params = vllm_sampling_params
|
| 391 |
+
self.unsloth_num_chunks = unsloth_num_chunks
|
| 392 |
+
pass
|
| 393 |
+
|
| 394 |
+
class _UnslothORPOTrainer(Trainer):
|
| 395 |
+
r""""""
|
| 396 |
+
|
| 397 |
+
_tag_names = ["trl", "orpo"]
|
| 398 |
+
|
| 399 |
+
def __init__(
|
| 400 |
+
self,
|
| 401 |
+
model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
|
| 402 |
+
args: Optional[ORPOConfig] = None,
|
| 403 |
+
data_collator: Optional[DataCollator] = None,
|
| 404 |
+
train_dataset: Optional[Dataset] = None,
|
| 405 |
+
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
|
| 406 |
+
processing_class: Optional[
|
| 407 |
+
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
|
| 408 |
+
] = None,
|
| 409 |
+
model_init: Optional[Callable[[], PreTrainedModel]] = None,
|
| 410 |
+
callbacks: Optional[list[TrainerCallback]] = None,
|
| 411 |
+
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
| 412 |
+
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
| 413 |
+
peft_config: Optional[dict] = None,
|
| 414 |
+
compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
|
| 415 |
+
):
|
| 416 |
+
if args.model_init_kwargs is None:
|
| 417 |
+
model_init_kwargs = {}
|
| 418 |
+
elif not isinstance(model, str):
|
| 419 |
+
raise ValueError("You passed model_kwargs to the ORPOTrainer. But your model is already instantiated.")
|
| 420 |
+
else:
|
| 421 |
+
model_init_kwargs = args.model_init_kwargs
|
| 422 |
+
torch_dtype = model_init_kwargs.get("torch_dtype")
|
| 423 |
+
if torch_dtype is not None:
|
| 424 |
+
# Convert to `torch.dtype` if an str is passed
|
| 425 |
+
if isinstance(torch_dtype, str) and torch_dtype != "auto":
|
| 426 |
+
torch_dtype = getattr(torch, torch_dtype)
|
| 427 |
+
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
|
| 428 |
+
raise ValueError(
|
| 429 |
+
f"Invalid `torch_dtype` passed to the ORPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
|
| 430 |
+
)
|
| 431 |
+
model_init_kwargs["torch_dtype"] = torch_dtype
|
| 432 |
+
|
| 433 |
+
if isinstance(model, str):
|
| 434 |
+
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
|
| 435 |
+
|
| 436 |
+
# Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
|
| 437 |
+
# has been called in order to properly call autocast if needed.
|
| 438 |
+
self._peft_has_been_casted_to_bf16 = False
|
| 439 |
+
|
| 440 |
+
if not is_peft_available() and peft_config is not None:
|
| 441 |
+
raise ValueError(
|
| 442 |
+
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
|
| 443 |
+
)
|
| 444 |
+
elif is_peft_available() and peft_config is not None:
|
| 445 |
+
# if model is a peft model and we have a peft_config, we merge and unload it first
|
| 446 |
+
if isinstance(model, PeftModel):
|
| 447 |
+
model = model.merge_and_unload()
|
| 448 |
+
|
| 449 |
+
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
|
| 450 |
+
_support_gc_kwargs = hasattr(
|
| 451 |
+
args, "gradient_checkpointing_kwargs"
|
| 452 |
+
) and "gradient_checkpointing_kwargs" in list(
|
| 453 |
+
inspect.signature(prepare_model_for_kbit_training).parameters
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
|
| 457 |
+
|
| 458 |
+
if _support_gc_kwargs:
|
| 459 |
+
prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
|
| 460 |
+
|
| 461 |
+
model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
|
| 462 |
+
elif getattr(args, "gradient_checkpointing", False):
|
| 463 |
+
# For backward compatibility with older versions of transformers
|
| 464 |
+
if hasattr(model, "enable_input_require_grads"):
|
| 465 |
+
model.enable_input_require_grads()
|
| 466 |
+
else:
|
| 467 |
+
|
| 468 |
+
def make_inputs_require_grad(module, input, output):
|
| 469 |
+
output.requires_grad_(True)
|
| 470 |
+
|
| 471 |
+
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
| 472 |
+
|
| 473 |
+
# get peft model with the given config
|
| 474 |
+
model = model
|
| 475 |
+
if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
|
| 476 |
+
peft_module_casting_to_bf16(model)
|
| 477 |
+
# If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
|
| 478 |
+
self._peft_has_been_casted_to_bf16 = True
|
| 479 |
+
|
| 480 |
+
# For models that use gradient_checkpointing, we need to attach a hook that enables input
|
| 481 |
+
# to explicitly have `requires_grad=True`, otherwise training will either silently
|
| 482 |
+
# fail or completely fail.
|
| 483 |
+
elif getattr(args, "gradient_checkpointing", False):
|
| 484 |
+
# For backward compatibility with older versions of transformers
|
| 485 |
+
if hasattr(model, "enable_input_require_grads"):
|
| 486 |
+
model.enable_input_require_grads()
|
| 487 |
+
else:
|
| 488 |
+
|
| 489 |
+
def make_inputs_require_grad(module, input, output):
|
| 490 |
+
output.requires_grad_(True)
|
| 491 |
+
|
| 492 |
+
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
| 493 |
+
|
| 494 |
+
if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
|
| 495 |
+
raise ValueError(
|
| 496 |
+
"`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
|
| 497 |
+
" Please install `wandb` or `comet-ml` to resolve."
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
if model is not None:
|
| 501 |
+
self.is_encoder_decoder = model.config.is_encoder_decoder
|
| 502 |
+
elif args.is_encoder_decoder is None:
|
| 503 |
+
raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
|
| 504 |
+
else:
|
| 505 |
+
self.is_encoder_decoder = args.is_encoder_decoder
|
| 506 |
+
|
| 507 |
+
if self.is_encoder_decoder:
|
| 508 |
+
self.decoder_start_token_id = model.config.decoder_start_token_id
|
| 509 |
+
self.pad_token_id = model.config.pad_token_id
|
| 510 |
+
|
| 511 |
+
if processing_class is None:
|
| 512 |
+
raise ValueError("processing_class must be specified to tokenize a ORPO dataset.")
|
| 513 |
+
if args.max_length is None:
|
| 514 |
+
warnings.warn(
|
| 515 |
+
"`max_length` is not set in the ORPOConfig's init"
|
| 516 |
+
" it will default to `512` by default, but you should do it yourself in the future.",
|
| 517 |
+
UserWarning,
|
| 518 |
+
)
|
| 519 |
+
max_length = 512
|
| 520 |
+
else:
|
| 521 |
+
max_length = args.max_length
|
| 522 |
+
if args.max_prompt_length is None:
|
| 523 |
+
warnings.warn(
|
| 524 |
+
"`max_prompt_length` is not set in the ORPOConfig's init"
|
| 525 |
+
" it will default to `128` by default, but you should do it yourself in the future.",
|
| 526 |
+
UserWarning,
|
| 527 |
+
)
|
| 528 |
+
max_prompt_length = 128
|
| 529 |
+
else:
|
| 530 |
+
max_prompt_length = args.max_prompt_length
|
| 531 |
+
|
| 532 |
+
if args.max_completion_length is None and self.is_encoder_decoder:
|
| 533 |
+
warnings.warn(
|
| 534 |
+
"When using an encoder decoder architecture, you should set `max_completion_length` in the ORPOConfig's init"
|
| 535 |
+
" it will default to `128` by default, but you should do it yourself in the future.",
|
| 536 |
+
UserWarning,
|
| 537 |
+
)
|
| 538 |
+
self.max_completion_length = 128
|
| 539 |
+
else:
|
| 540 |
+
self.max_completion_length = args.max_completion_length
|
| 541 |
+
|
| 542 |
+
if data_collator is None:
|
| 543 |
+
data_collator = DPODataCollatorWithPadding(
|
| 544 |
+
pad_token_id=processing_class.pad_token_id,
|
| 545 |
+
label_pad_token_id=args.label_pad_token_id,
|
| 546 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 547 |
+
)
|
| 548 |
+
|
| 549 |
+
if args.remove_unused_columns:
|
| 550 |
+
args.remove_unused_columns = False
|
| 551 |
+
# warn users
|
| 552 |
+
warnings.warn(
|
| 553 |
+
"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments"
|
| 554 |
+
" we have set it for you, but you should do it yourself in the future.",
|
| 555 |
+
UserWarning,
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
self.use_dpo_data_collator = True
|
| 559 |
+
else:
|
| 560 |
+
self.use_dpo_data_collator = False
|
| 561 |
+
|
| 562 |
+
# Disable dropout in the model and reference model
|
| 563 |
+
if args.disable_dropout:
|
| 564 |
+
disable_dropout_in_model(model)
|
| 565 |
+
|
| 566 |
+
self.max_length = max_length
|
| 567 |
+
self.generate_during_eval = args.generate_during_eval
|
| 568 |
+
self.label_pad_token_id = args.label_pad_token_id
|
| 569 |
+
self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id
|
| 570 |
+
self.max_prompt_length = max_prompt_length
|
| 571 |
+
self.truncation_mode = args.truncation_mode
|
| 572 |
+
self.processing_class = processing_class
|
| 573 |
+
|
| 574 |
+
self.beta = args.beta
|
| 575 |
+
self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
|
| 576 |
+
self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
|
| 577 |
+
if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
|
| 578 |
+
warnings.warn(
|
| 579 |
+
"You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
|
| 580 |
+
"`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
|
| 581 |
+
"greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
|
| 582 |
+
"loss.",
|
| 583 |
+
UserWarning,
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
self._stored_metrics = defaultdict(lambda: defaultdict(list))
|
| 587 |
+
|
| 588 |
+
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
|
| 589 |
+
# input tensor associated with the key "input_ids". However, in ORPO, the sampled data does not include the
|
| 590 |
+
# "input_ids" key. Instead, the available keys are "prompt_input_ids", "chosen_input_ids", and
|
| 591 |
+
# "rejected_input_ids". As a result, the trainer issues the warning: "Could not estimate the number of tokens
|
| 592 |
+
# of the input, floating-point operations will not be computed." To suppress this warning, we set the
|
| 593 |
+
# "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate
|
| 594 |
+
# that the warning has already been issued.
|
| 595 |
+
model.warnings_issued["estimate_tokens"] = True
|
| 596 |
+
|
| 597 |
+
# Compute that only on the main process for faster data processing.
|
| 598 |
+
# see: https://github.com/huggingface/trl/pull/1255
|
| 599 |
+
with PartialState().local_main_process_first():
|
| 600 |
+
# Extract the prompt if needed, and apply the chat template if needed
|
| 601 |
+
train_dataset = train_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc)
|
| 602 |
+
train_dataset = train_dataset.map(
|
| 603 |
+
maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc
|
| 604 |
+
)
|
| 605 |
+
train_dataset = train_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc)
|
| 606 |
+
if eval_dataset is not None:
|
| 607 |
+
eval_dataset = eval_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc)
|
| 608 |
+
eval_dataset = eval_dataset.map(
|
| 609 |
+
maybe_apply_chat_template,
|
| 610 |
+
fn_kwargs={"tokenizer": processing_class},
|
| 611 |
+
num_proc=args.dataset_num_proc,
|
| 612 |
+
)
|
| 613 |
+
eval_dataset = eval_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc)
|
| 614 |
+
|
| 615 |
+
super().__init__(
|
| 616 |
+
model=model,
|
| 617 |
+
args=args,
|
| 618 |
+
data_collator=data_collator,
|
| 619 |
+
train_dataset=train_dataset,
|
| 620 |
+
eval_dataset=eval_dataset,
|
| 621 |
+
processing_class=processing_class,
|
| 622 |
+
model_init=model_init,
|
| 623 |
+
compute_metrics=compute_metrics,
|
| 624 |
+
callbacks=callbacks,
|
| 625 |
+
optimizers=optimizers,
|
| 626 |
+
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
# Add tags for models that have been loaded with the correct transformers version
|
| 630 |
+
if hasattr(self.model, "add_model_tags"):
|
| 631 |
+
self.model.add_model_tags(self._tag_names)
|
| 632 |
+
|
| 633 |
+
if not hasattr(self, "accelerator"):
|
| 634 |
+
raise AttributeError(
|
| 635 |
+
"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
|
| 639 |
+
# Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
|
| 640 |
+
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
|
| 641 |
+
config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config)
|
| 642 |
+
|
| 643 |
+
if model is not None:
|
| 644 |
+
if hasattr(model, "config"):
|
| 645 |
+
hidden_size = (
|
| 646 |
+
max(model.config.hidden_sizes)
|
| 647 |
+
if getattr(model.config, "hidden_sizes", None)
|
| 648 |
+
else getattr(model.config, "hidden_size", None)
|
| 649 |
+
)
|
| 650 |
+
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
|
| 651 |
+
# Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
|
| 652 |
+
# This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
|
| 653 |
+
config_kwargs.update(
|
| 654 |
+
{
|
| 655 |
+
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
|
| 656 |
+
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
|
| 657 |
+
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
|
| 658 |
+
}
|
| 659 |
+
)
|
| 660 |
+
|
| 661 |
+
# If ZeRO-3 is used, we shard both the active and reference model.
|
| 662 |
+
# Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
|
| 663 |
+
if config_kwargs["zero_optimization"]["stage"] != 3:
|
| 664 |
+
config_kwargs["zero_optimization"]["stage"] = 0
|
| 665 |
+
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
|
| 666 |
+
model.eval()
|
| 667 |
+
return model
|
| 668 |
+
|
| 669 |
+
def build_tokenized_answer(self, prompt, answer):
|
| 670 |
+
"""
|
| 671 |
+
Llama tokenizer does satisfy `enc(a + b) = enc(a) + enc(b)`.
|
| 672 |
+
It does ensure `enc(a + b) = enc(a) + enc(a + b)[len(enc(a)):]`.
|
| 673 |
+
Reference:
|
| 674 |
+
https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
|
| 675 |
+
"""
|
| 676 |
+
|
| 677 |
+
full_tokenized = self.processing_class(prompt + answer, add_special_tokens=False)
|
| 678 |
+
prompt_input_ids = self.processing_class(prompt, add_special_tokens=False)["input_ids"]
|
| 679 |
+
|
| 680 |
+
answer_input_ids = full_tokenized["input_ids"][len(prompt_input_ids) :]
|
| 681 |
+
answer_attention_mask = full_tokenized["attention_mask"][len(prompt_input_ids) :]
|
| 682 |
+
|
| 683 |
+
# Concat tokens to form `enc(a) + enc(a + b)[len(enc(a)):]`
|
| 684 |
+
full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids])
|
| 685 |
+
|
| 686 |
+
# Prepare input tokens for token by token comparison
|
| 687 |
+
full_input_ids = np.array(full_tokenized["input_ids"])
|
| 688 |
+
|
| 689 |
+
if len(full_input_ids) != len(full_concat_input_ids):
|
| 690 |
+
raise ValueError("Prompt input ids and answer input ids should have the same length.")
|
| 691 |
+
|
| 692 |
+
# On some tokenizers, like Llama-2 tokenizer, there are occasions where tokens
|
| 693 |
+
# can be merged together when tokenizing prompt+answer. This could result
|
| 694 |
+
# on the last token from the prompt being different when tokenized on its own
|
| 695 |
+
# vs when done as prompt+answer.
|
| 696 |
+
response_token_ids_start_idx = len(prompt_input_ids)
|
| 697 |
+
|
| 698 |
+
# If tokenized prompt is different than both prompt+answer, then it means the
|
| 699 |
+
# last token has changed due to merging.
|
| 700 |
+
if prompt_input_ids != full_tokenized["input_ids"][:response_token_ids_start_idx]:
|
| 701 |
+
response_token_ids_start_idx -= 1
|
| 702 |
+
|
| 703 |
+
prompt_input_ids = full_tokenized["input_ids"][:response_token_ids_start_idx]
|
| 704 |
+
prompt_attention_mask = full_tokenized["attention_mask"][:response_token_ids_start_idx]
|
| 705 |
+
|
| 706 |
+
if len(prompt_input_ids) != len(prompt_attention_mask):
|
| 707 |
+
raise ValueError("Prompt input ids and attention mask should have the same length.")
|
| 708 |
+
|
| 709 |
+
answer_input_ids = full_tokenized["input_ids"][response_token_ids_start_idx:]
|
| 710 |
+
answer_attention_mask = full_tokenized["attention_mask"][response_token_ids_start_idx:]
|
| 711 |
+
|
| 712 |
+
return dict(
|
| 713 |
+
prompt_input_ids=prompt_input_ids,
|
| 714 |
+
prompt_attention_mask=prompt_attention_mask,
|
| 715 |
+
input_ids=answer_input_ids,
|
| 716 |
+
attention_mask=answer_attention_mask,
|
| 717 |
+
)
|
| 718 |
+
|
| 719 |
+
def tokenize_row(self, feature, model: Optional[Union[PreTrainedModel, nn.Module]] = None) -> dict:
|
| 720 |
+
"""Tokenize a single row from a ORPO specific dataset.
|
| 721 |
+
|
| 722 |
+
At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
|
| 723 |
+
in case the prompt + chosen or prompt + rejected responses is/are too long. First
|
| 724 |
+
we truncate the prompt; if we're still too long, we truncate the chosen/rejected.
|
| 725 |
+
|
| 726 |
+
We also create the labels for the chosen/rejected responses, which are of length equal to
|
| 727 |
+
the sum of the length of the prompt and the chosen/rejected response, with
|
| 728 |
+
label_pad_token_id for the prompt tokens.
|
| 729 |
+
"""
|
| 730 |
+
batch = {}
|
| 731 |
+
prompt = feature["prompt"]
|
| 732 |
+
chosen = feature["chosen"]
|
| 733 |
+
rejected = feature["rejected"]
|
| 734 |
+
|
| 735 |
+
if not self.is_encoder_decoder:
|
| 736 |
+
# Check issues below for more details
|
| 737 |
+
# 1. https://github.com/huggingface/trl/issues/907
|
| 738 |
+
# 2. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
|
| 739 |
+
# 3. https://github.com/LianjiaTech/BELLE/issues/337
|
| 740 |
+
|
| 741 |
+
if not isinstance(prompt, str):
|
| 742 |
+
raise ValueError(f"prompt should be an str but got {type(prompt)}")
|
| 743 |
+
prompt_tokens = self.processing_class(prompt, add_special_tokens=False)
|
| 744 |
+
prompt_tokens = {f"prompt_{k}": v for k, v in prompt_tokens.items()}
|
| 745 |
+
|
| 746 |
+
if not isinstance(chosen, str):
|
| 747 |
+
raise ValueError(f"chosen should be an str but got {type(chosen)}")
|
| 748 |
+
chosen_tokens = self.build_tokenized_answer(prompt, chosen)
|
| 749 |
+
|
| 750 |
+
if not isinstance(rejected, str):
|
| 751 |
+
raise ValueError(f"rejected should be an str but got {type(rejected)}")
|
| 752 |
+
rejected_tokens = self.build_tokenized_answer(prompt, rejected)
|
| 753 |
+
|
| 754 |
+
# Last prompt token might get merged by tokenizer and
|
| 755 |
+
# it should not be included for generation if that happens
|
| 756 |
+
prompt_len_input_ids = len(prompt_tokens["prompt_input_ids"])
|
| 757 |
+
|
| 758 |
+
chosen_prompt_len_input_ids = len(chosen_tokens["prompt_input_ids"])
|
| 759 |
+
rejected_prompt_len_input_ids = len(rejected_tokens["prompt_input_ids"])
|
| 760 |
+
prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids)
|
| 761 |
+
|
| 762 |
+
for k, v in prompt_tokens.items():
|
| 763 |
+
prompt_tokens[k] = v[:prompt_len_input_ids]
|
| 764 |
+
|
| 765 |
+
# Make sure prompts only have one different token at most an
|
| 766 |
+
# and length only differs by 1 at most
|
| 767 |
+
num_diff_tokens = sum(
|
| 768 |
+
[a != b for a, b in zip(chosen_tokens["prompt_input_ids"], rejected_tokens["prompt_input_ids"])]
|
| 769 |
+
)
|
| 770 |
+
num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids)
|
| 771 |
+
if num_diff_tokens > 1 or num_diff_len > 1:
|
| 772 |
+
raise ValueError(
|
| 773 |
+
"Chosen and rejected prompt_input_ids might only differ on the "
|
| 774 |
+
"last token due to tokenizer merge ops."
|
| 775 |
+
)
|
| 776 |
+
|
| 777 |
+
# add BOS token to head of prompt. Avoid adding if it's already there
|
| 778 |
+
prompt_tokens, chosen_tokens, rejected_tokens = add_bos_token_if_needed(
|
| 779 |
+
self.processing_class.bos_token_id,
|
| 780 |
+
prompt_len_input_ids,
|
| 781 |
+
prompt_tokens,
|
| 782 |
+
chosen_prompt_len_input_ids,
|
| 783 |
+
chosen_tokens,
|
| 784 |
+
rejected_prompt_len_input_ids,
|
| 785 |
+
rejected_tokens,
|
| 786 |
+
)
|
| 787 |
+
|
| 788 |
+
# add EOS token to end of answer. Avoid adding if it's already there
|
| 789 |
+
chosen_tokens, rejected_tokens = add_eos_token_if_needed(
|
| 790 |
+
self.processing_class.eos_token_id, chosen_tokens, rejected_tokens
|
| 791 |
+
)
|
| 792 |
+
|
| 793 |
+
longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"]))
|
| 794 |
+
|
| 795 |
+
# if combined sequence is too long, truncate the prompt
|
| 796 |
+
for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]:
|
| 797 |
+
if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
|
| 798 |
+
if self.truncation_mode == "keep_start":
|
| 799 |
+
for k in ["prompt_input_ids", "prompt_attention_mask"]:
|
| 800 |
+
answer_tokens[k] = answer_tokens[k][: self.max_prompt_length]
|
| 801 |
+
elif self.truncation_mode == "keep_end":
|
| 802 |
+
for k in ["prompt_input_ids", "prompt_attention_mask"]:
|
| 803 |
+
answer_tokens[k] = answer_tokens[k][-self.max_prompt_length :]
|
| 804 |
+
else:
|
| 805 |
+
raise ValueError(f"Unknown truncation mode: {self.truncation_mode}")
|
| 806 |
+
|
| 807 |
+
# if that's still too long, truncate the response
|
| 808 |
+
for answer_tokens in [chosen_tokens, rejected_tokens]:
|
| 809 |
+
if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
|
| 810 |
+
for k in ["input_ids", "attention_mask"]:
|
| 811 |
+
answer_tokens[k] = answer_tokens[k][: self.max_length - self.max_prompt_length]
|
| 812 |
+
|
| 813 |
+
# Create labels
|
| 814 |
+
chosen_sequence_tokens = {
|
| 815 |
+
k: chosen_tokens[f"prompt_{k}"] + chosen_tokens[k] for k in ["input_ids", "attention_mask"]
|
| 816 |
+
}
|
| 817 |
+
rejected_sequence_tokens = {
|
| 818 |
+
k: rejected_tokens[f"prompt_{k}"] + rejected_tokens[k] for k in ["input_ids", "attention_mask"]
|
| 819 |
+
}
|
| 820 |
+
chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:]
|
| 821 |
+
chosen_sequence_tokens["labels"][: len(chosen_tokens["prompt_input_ids"])] = [
|
| 822 |
+
self.label_pad_token_id
|
| 823 |
+
] * len(chosen_tokens["prompt_input_ids"])
|
| 824 |
+
rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:]
|
| 825 |
+
rejected_sequence_tokens["labels"][: len(rejected_tokens["prompt_input_ids"])] = [
|
| 826 |
+
self.label_pad_token_id
|
| 827 |
+
] * len(rejected_tokens["prompt_input_ids"])
|
| 828 |
+
|
| 829 |
+
for k, toks in {
|
| 830 |
+
"chosen_": chosen_sequence_tokens,
|
| 831 |
+
"rejected_": rejected_sequence_tokens,
|
| 832 |
+
"": prompt_tokens,
|
| 833 |
+
}.items():
|
| 834 |
+
for type_key, tokens in toks.items():
|
| 835 |
+
if type_key == "token_type_ids":
|
| 836 |
+
continue
|
| 837 |
+
batch[f"{k}{type_key}"] = tokens
|
| 838 |
+
|
| 839 |
+
else:
|
| 840 |
+
chosen_tokens = self.processing_class(
|
| 841 |
+
chosen, truncation=True, max_length=self.max_completion_length, add_special_tokens=True
|
| 842 |
+
)
|
| 843 |
+
rejected_tokens = self.processing_class(
|
| 844 |
+
rejected, truncation=True, max_length=self.max_completion_length, add_special_tokens=True
|
| 845 |
+
)
|
| 846 |
+
prompt_tokens = self.processing_class(
|
| 847 |
+
prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True
|
| 848 |
+
)
|
| 849 |
+
|
| 850 |
+
batch["chosen_labels"] = chosen_tokens["input_ids"]
|
| 851 |
+
batch["rejected_labels"] = rejected_tokens["input_ids"]
|
| 852 |
+
batch["prompt_input_ids"] = prompt_tokens["input_ids"]
|
| 853 |
+
batch["prompt_attention_mask"] = prompt_tokens["attention_mask"]
|
| 854 |
+
|
| 855 |
+
if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
|
| 856 |
+
batch["rejected_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
|
| 857 |
+
labels=torch.tensor(batch["rejected_labels"])
|
| 858 |
+
)
|
| 859 |
+
batch["chosen_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
|
| 860 |
+
labels=torch.tensor(batch["chosen_labels"])
|
| 861 |
+
)
|
| 862 |
+
|
| 863 |
+
if is_torch_xla_available():
|
| 864 |
+
# Pad the sequences to global max_length to avoid TorchXLA recompilation
|
| 865 |
+
for k in batch:
|
| 866 |
+
if "labels" in k or self.is_encoder_decoder:
|
| 867 |
+
pad_value = self.label_pad_token_id
|
| 868 |
+
elif k.endswith("_input_ids"):
|
| 869 |
+
pad_value = self.padding_value
|
| 870 |
+
elif k.endswith("_attention_mask"):
|
| 871 |
+
pad_value = 0
|
| 872 |
+
batch[k] = batch[k] + [pad_value] * (self.max_length - len(batch[k]))
|
| 873 |
+
return batch
|
| 874 |
+
|
| 875 |
+
@staticmethod
|
| 876 |
+
def concatenated_inputs(
|
| 877 |
+
batch: dict[str, Union[list, torch.LongTensor]],
|
| 878 |
+
is_encoder_decoder: bool = False,
|
| 879 |
+
label_pad_token_id: int = -100,
|
| 880 |
+
padding_value: int = 0,
|
| 881 |
+
device: Optional[torch.device] = None,
|
| 882 |
+
) -> dict[str, torch.LongTensor]:
|
| 883 |
+
"""Concatenate the chosen and rejected inputs into a single tensor.
|
| 884 |
+
|
| 885 |
+
Args:
|
| 886 |
+
batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length).
|
| 887 |
+
is_encoder_decoder: Whether the model is an encoder-decoder model.
|
| 888 |
+
label_pad_token_id: The label pad token id.
|
| 889 |
+
padding_value: The padding value to use for the concatenated inputs_ids.
|
| 890 |
+
device: The device for the concatenated inputs.
|
| 891 |
+
|
| 892 |
+
Returns:
|
| 893 |
+
A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'.
|
| 894 |
+
"""
|
| 895 |
+
concatenated_batch = {}
|
| 896 |
+
|
| 897 |
+
if is_encoder_decoder:
|
| 898 |
+
max_length = max(batch["chosen_labels"].shape[1], batch["rejected_labels"].shape[1])
|
| 899 |
+
else:
|
| 900 |
+
max_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1])
|
| 901 |
+
|
| 902 |
+
for k in batch:
|
| 903 |
+
if k.startswith("chosen") and isinstance(batch[k], torch.Tensor):
|
| 904 |
+
if "labels" in k or is_encoder_decoder:
|
| 905 |
+
pad_value = label_pad_token_id
|
| 906 |
+
elif k.endswith("_input_ids"):
|
| 907 |
+
pad_value = padding_value
|
| 908 |
+
elif k.endswith("_attention_mask"):
|
| 909 |
+
pad_value = 0
|
| 910 |
+
concatenated_key = k.replace("chosen", "concatenated")
|
| 911 |
+
concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)
|
| 912 |
+
for k in batch:
|
| 913 |
+
if k.startswith("rejected") and isinstance(batch[k], torch.Tensor):
|
| 914 |
+
if "labels" in k or is_encoder_decoder:
|
| 915 |
+
pad_value = label_pad_token_id
|
| 916 |
+
elif k.endswith("_input_ids"):
|
| 917 |
+
pad_value = padding_value
|
| 918 |
+
elif k.endswith("_attention_mask"):
|
| 919 |
+
pad_value = 0
|
| 920 |
+
concatenated_key = k.replace("rejected", "concatenated")
|
| 921 |
+
concatenated_batch[concatenated_key] = torch.cat(
|
| 922 |
+
(
|
| 923 |
+
concatenated_batch[concatenated_key],
|
| 924 |
+
pad_to_length(batch[k], max_length, pad_value=pad_value),
|
| 925 |
+
),
|
| 926 |
+
dim=0,
|
| 927 |
+
).to(device=device)
|
| 928 |
+
|
| 929 |
+
if is_encoder_decoder:
|
| 930 |
+
concatenated_batch["concatenated_input_ids"] = batch["prompt_input_ids"].repeat(2, 1).to(device=device)
|
| 931 |
+
concatenated_batch["concatenated_attention_mask"] = (
|
| 932 |
+
batch["prompt_attention_mask"].repeat(2, 1).to(device=device)
|
| 933 |
+
)
|
| 934 |
+
|
| 935 |
+
return concatenated_batch
|
| 936 |
+
|
| 937 |
+
def odds_ratio_loss(
|
| 938 |
+
self,
|
| 939 |
+
policy_chosen_logps: torch.FloatTensor,
|
| 940 |
+
policy_rejected_logps: torch.FloatTensor,
|
| 941 |
+
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
| 942 |
+
"""Compute ORPO's odds ratio (OR) loss for a batch of policy and reference model log probabilities.
|
| 943 |
+
|
| 944 |
+
Args:
|
| 945 |
+
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)
|
| 946 |
+
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)
|
| 947 |
+
|
| 948 |
+
Returns:
|
| 949 |
+
A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).
|
| 950 |
+
The losses tensor contains the ORPO loss for each example in the batch.
|
| 951 |
+
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
|
| 952 |
+
The log odds ratio of the chosen responses over the rejected responses ratio for logging purposes.
|
| 953 |
+
The `log(sigmoid(log_odds_chosen))` for logging purposes.
|
| 954 |
+
"""
|
| 955 |
+
|
| 956 |
+
# Derived from Eqs. (4) and (7) from https://huggingface.co/papers/2403.07691 by using log identities and exp(log(P(y|x)) = P(y|x)
|
| 957 |
+
log_odds = (policy_chosen_logps - policy_rejected_logps) - (
|
| 958 |
+
torch.log1p(-torch.exp(policy_chosen_logps)) - torch.log1p(-torch.exp(policy_rejected_logps))
|
| 959 |
+
)
|
| 960 |
+
ratio = F.logsigmoid(log_odds)
|
| 961 |
+
losses = self.beta * ratio
|
| 962 |
+
|
| 963 |
+
chosen_rewards = self.beta * (policy_chosen_logps.to(self.accelerator.device)).detach()
|
| 964 |
+
rejected_rewards = self.beta * (policy_rejected_logps.to(self.accelerator.device)).detach()
|
| 965 |
+
|
| 966 |
+
return losses, chosen_rewards, rejected_rewards, torch.mean(ratio), torch.mean(log_odds)
|
| 967 |
+
|
| 968 |
+
@staticmethod
|
| 969 |
+
def get_batch_logps(
|
| 970 |
+
logits: torch.FloatTensor,
|
| 971 |
+
labels: torch.LongTensor,
|
| 972 |
+
average_log_prob: bool = False,
|
| 973 |
+
label_pad_token_id: int = -100,
|
| 974 |
+
is_encoder_decoder: bool = False,
|
| 975 |
+
) -> torch.FloatTensor:
|
| 976 |
+
"""Compute the log probabilities of the given labels under the given logits.
|
| 977 |
+
|
| 978 |
+
Args:
|
| 979 |
+
logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
|
| 980 |
+
labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)
|
| 981 |
+
average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.
|
| 982 |
+
label_pad_token_id: The label pad token id.
|
| 983 |
+
is_encoder_decoder: Whether the model is an encoder-decoder model.
|
| 984 |
+
|
| 985 |
+
Returns:
|
| 986 |
+
A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.
|
| 987 |
+
"""
|
| 988 |
+
if logits.shape[:-1] != labels.shape:
|
| 989 |
+
raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
|
| 990 |
+
|
| 991 |
+
if not is_encoder_decoder:
|
| 992 |
+
labels = labels[:, 1:].clone()
|
| 993 |
+
logits = logits[:, :-1, :]
|
| 994 |
+
loss_mask = labels != label_pad_token_id
|
| 995 |
+
|
| 996 |
+
# dummy token; we'll ignore the losses on these tokens later
|
| 997 |
+
labels = torch.where(labels == label_pad_token_id, 0, labels)
|
| 998 |
+
|
| 999 |
+
per_token_logps = selective_log_softmax(logits, labels)
|
| 1000 |
+
|
| 1001 |
+
if average_log_prob:
|
| 1002 |
+
return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
|
| 1003 |
+
else:
|
| 1004 |
+
return (per_token_logps * loss_mask).sum(-1)
|
| 1005 |
+
|
| 1006 |
+
def concatenated_forward(
|
| 1007 |
+
self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]
|
| 1008 |
+
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
| 1009 |
+
"""Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.
|
| 1010 |
+
|
| 1011 |
+
We do this to avoid doing two forward passes, because it's faster for FSDP.
|
| 1012 |
+
"""
|
| 1013 |
+
concatenated_batch = self.concatenated_inputs(
|
| 1014 |
+
batch,
|
| 1015 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 1016 |
+
label_pad_token_id=self.label_pad_token_id,
|
| 1017 |
+
padding_value=self.padding_value,
|
| 1018 |
+
device=self.accelerator.device,
|
| 1019 |
+
)
|
| 1020 |
+
len_chosen = batch["chosen_labels"].shape[0]
|
| 1021 |
+
|
| 1022 |
+
model_kwargs = (
|
| 1023 |
+
{
|
| 1024 |
+
"decoder_input_ids": self._shift_right(concatenated_batch["concatenated_labels"]),
|
| 1025 |
+
}
|
| 1026 |
+
if self.is_encoder_decoder
|
| 1027 |
+
else {}
|
| 1028 |
+
)
|
| 1029 |
+
|
| 1030 |
+
if self.aux_loss_enabled:
|
| 1031 |
+
model_kwargs["output_router_logits"] = True
|
| 1032 |
+
|
| 1033 |
+
outputs = model(
|
| 1034 |
+
concatenated_batch["concatenated_input_ids"],
|
| 1035 |
+
attention_mask=concatenated_batch["concatenated_attention_mask"],
|
| 1036 |
+
use_cache=False,
|
| 1037 |
+
**model_kwargs,
|
| 1038 |
+
)
|
| 1039 |
+
all_logits = outputs.logits
|
| 1040 |
+
|
| 1041 |
+
def cross_entropy_loss(logits, labels):
|
| 1042 |
+
if not self.is_encoder_decoder:
|
| 1043 |
+
# Shift so that tokens < n predict n
|
| 1044 |
+
logits = logits[..., :-1, :].contiguous()
|
| 1045 |
+
labels = labels[..., 1:].contiguous()
|
| 1046 |
+
# Flatten the tokens
|
| 1047 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 1048 |
+
logits = logits.view(-1, logits.shape[-1])
|
| 1049 |
+
labels = labels.view(-1)
|
| 1050 |
+
# Enable model parallelism
|
| 1051 |
+
labels = labels.to(logits.device)
|
| 1052 |
+
loss = loss_fct(logits, labels)
|
| 1053 |
+
return loss
|
| 1054 |
+
|
| 1055 |
+
if self.is_encoder_decoder:
|
| 1056 |
+
labels = concatenated_batch["concatenated_labels"].clone()
|
| 1057 |
+
else:
|
| 1058 |
+
labels = concatenated_batch["concatenated_input_ids"].clone()
|
| 1059 |
+
attention_mask = concatenated_batch["concatenated_attention_mask"]
|
| 1060 |
+
labels = torch.where(attention_mask == 1, labels, self.label_pad_token_id)
|
| 1061 |
+
# orpo chosen nll loss is computed over the full prompt and response
|
| 1062 |
+
chosen_nll_loss = cross_entropy_loss(all_logits[:len_chosen], labels[:len_chosen])
|
| 1063 |
+
|
| 1064 |
+
all_logps = self.get_batch_logps(
|
| 1065 |
+
all_logits,
|
| 1066 |
+
concatenated_batch["concatenated_labels"],
|
| 1067 |
+
average_log_prob=True,
|
| 1068 |
+
is_encoder_decoder=self.is_encoder_decoder,
|
| 1069 |
+
label_pad_token_id=self.label_pad_token_id,
|
| 1070 |
+
)
|
| 1071 |
+
|
| 1072 |
+
chosen_logps = all_logps[:len_chosen]
|
| 1073 |
+
rejected_logps = all_logps[len_chosen:]
|
| 1074 |
+
|
| 1075 |
+
if not self.is_encoder_decoder:
|
| 1076 |
+
chosen_logits = all_logits[:len_chosen, :-1, :]
|
| 1077 |
+
rejected_logits = all_logits[len_chosen:, :-1, :]
|
| 1078 |
+
else:
|
| 1079 |
+
chosen_logits = all_logits[:len_chosen]
|
| 1080 |
+
rejected_logits = all_logits[len_chosen:]
|
| 1081 |
+
|
| 1082 |
+
if self.aux_loss_enabled:
|
| 1083 |
+
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_nll_loss, outputs.aux_loss)
|
| 1084 |
+
|
| 1085 |
+
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_nll_loss)
|
| 1086 |
+
|
| 1087 |
+
def get_batch_loss_metrics(
|
| 1088 |
+
self,
|
| 1089 |
+
model,
|
| 1090 |
+
batch: dict[str, Union[list, torch.LongTensor]],
|
| 1091 |
+
train_eval: Literal["train", "eval"] = "train",
|
| 1092 |
+
):
|
| 1093 |
+
"""Compute the ORPO loss and other metrics for the given batch of inputs for train or test."""
|
| 1094 |
+
metrics = {}
|
| 1095 |
+
|
| 1096 |
+
forward_output = self.concatenated_forward(model, batch)
|
| 1097 |
+
(
|
| 1098 |
+
policy_chosen_logps,
|
| 1099 |
+
policy_rejected_logps,
|
| 1100 |
+
policy_chosen_logits,
|
| 1101 |
+
policy_rejected_logits,
|
| 1102 |
+
policy_nll_loss,
|
| 1103 |
+
) = forward_output[:5]
|
| 1104 |
+
if self.aux_loss_enabled:
|
| 1105 |
+
aux_loss = forward_output[5]
|
| 1106 |
+
|
| 1107 |
+
losses, chosen_rewards, rejected_rewards, log_odds_ratio, log_odds_chosen = self.odds_ratio_loss(
|
| 1108 |
+
policy_chosen_logps, policy_rejected_logps
|
| 1109 |
+
)
|
| 1110 |
+
# full ORPO loss
|
| 1111 |
+
loss = policy_nll_loss - losses.mean()
|
| 1112 |
+
|
| 1113 |
+
reward_accuracies = (chosen_rewards > rejected_rewards).float()
|
| 1114 |
+
|
| 1115 |
+
prefix = "eval_" if train_eval == "eval" else ""
|
| 1116 |
+
metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(chosen_rewards).mean()
|
| 1117 |
+
metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(rejected_rewards).mean()
|
| 1118 |
+
metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(reward_accuracies).mean()
|
| 1119 |
+
metrics[f"{prefix}rewards/margins"] = self.accelerator.gather_for_metrics(
|
| 1120 |
+
chosen_rewards - rejected_rewards
|
| 1121 |
+
).mean()
|
| 1122 |
+
metrics[f"{prefix}logps/rejected"] = self.accelerator.gather_for_metrics(policy_rejected_logps).detach().mean()
|
| 1123 |
+
metrics[f"{prefix}logps/chosen"] = self.accelerator.gather_for_metrics(policy_chosen_logps).detach().mean()
|
| 1124 |
+
metrics[f"{prefix}logits/rejected"] = (
|
| 1125 |
+
self.accelerator.gather_for_metrics(policy_rejected_logits).detach().mean()
|
| 1126 |
+
)
|
| 1127 |
+
metrics[f"{prefix}logits/chosen"] = self.accelerator.gather_for_metrics(policy_chosen_logits).detach().mean()
|
| 1128 |
+
metrics[f"{prefix}nll_loss"] = self.accelerator.gather_for_metrics(policy_nll_loss).detach().mean()
|
| 1129 |
+
metrics[f"{prefix}log_odds_ratio"] = self.accelerator.gather_for_metrics(log_odds_ratio).mean()
|
| 1130 |
+
metrics[f"{prefix}log_odds_chosen"] = self.accelerator.gather_for_metrics(log_odds_chosen).mean()
|
| 1131 |
+
if is_torch_xla_available():
|
| 1132 |
+
xm.mark_step() # needed because .item() calls
|
| 1133 |
+
for k, v in metrics.items():
|
| 1134 |
+
metrics[k] = v.item()
|
| 1135 |
+
if self.aux_loss_enabled:
|
| 1136 |
+
loss += self.aux_loss_coef * aux_loss
|
| 1137 |
+
|
| 1138 |
+
return loss, metrics
|
| 1139 |
+
|
| 1140 |
+
def compute_loss(
|
| 1141 |
+
self,
|
| 1142 |
+
model: Union[PreTrainedModel, nn.Module],
|
| 1143 |
+
inputs: dict[str, Union[torch.Tensor, Any]],
|
| 1144 |
+
return_outputs=False,
|
| 1145 |
+
num_items_in_batch=None,
|
| 1146 |
+
) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
|
| 1147 |
+
compute_loss_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1148 |
+
|
| 1149 |
+
with compute_loss_context_manager:
|
| 1150 |
+
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train")
|
| 1151 |
+
|
| 1152 |
+
# Make sure to move the loss to the device the original accumulating loss is at back in the `Trainer` class:
|
| 1153 |
+
loss = loss.to(self.args.device)
|
| 1154 |
+
|
| 1155 |
+
# force log the metrics
|
| 1156 |
+
self.store_metrics(metrics, train_eval="train")
|
| 1157 |
+
|
| 1158 |
+
if return_outputs:
|
| 1159 |
+
return (loss, metrics)
|
| 1160 |
+
return loss
|
| 1161 |
+
|
| 1162 |
+
def generate_from_model(self, model, batch: dict[str, torch.LongTensor]) -> str:
|
| 1163 |
+
"""Generate samples from the model and reference model for the given batch of inputs."""
|
| 1164 |
+
|
| 1165 |
+
# If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
|
| 1166 |
+
# the torch cuda amp context manager as some hidden states are silently casted to full precision.
|
| 1167 |
+
generate_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1168 |
+
|
| 1169 |
+
with generate_context_manager:
|
| 1170 |
+
policy_output = model.generate(
|
| 1171 |
+
input_ids=batch["prompt_input_ids"],
|
| 1172 |
+
attention_mask=batch["prompt_attention_mask"],
|
| 1173 |
+
max_length=self.max_length,
|
| 1174 |
+
do_sample=True,
|
| 1175 |
+
pad_token_id=self.processing_class.pad_token_id,
|
| 1176 |
+
)
|
| 1177 |
+
|
| 1178 |
+
policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
|
| 1179 |
+
policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
|
| 1180 |
+
|
| 1181 |
+
return policy_output_decoded
|
| 1182 |
+
|
| 1183 |
+
def prediction_step(
|
| 1184 |
+
self,
|
| 1185 |
+
model: Union[PreTrainedModel, nn.Module],
|
| 1186 |
+
inputs: dict[str, Union[torch.Tensor, Any]],
|
| 1187 |
+
prediction_loss_only: bool,
|
| 1188 |
+
ignore_keys: Optional[list[str]] = None,
|
| 1189 |
+
):
|
| 1190 |
+
if not self.use_dpo_data_collator:
|
| 1191 |
+
warnings.warn(
|
| 1192 |
+
"prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than "
|
| 1193 |
+
"DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator"
|
| 1194 |
+
)
|
| 1195 |
+
if ignore_keys is None:
|
| 1196 |
+
if hasattr(model, "config"):
|
| 1197 |
+
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
|
| 1198 |
+
else:
|
| 1199 |
+
ignore_keys = []
|
| 1200 |
+
|
| 1201 |
+
prediction_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
| 1202 |
+
|
| 1203 |
+
with torch.no_grad(), prediction_context_manager:
|
| 1204 |
+
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval")
|
| 1205 |
+
|
| 1206 |
+
# force log the metrics
|
| 1207 |
+
self.store_metrics(metrics, train_eval="eval")
|
| 1208 |
+
|
| 1209 |
+
if prediction_loss_only:
|
| 1210 |
+
return (loss.detach(), None, None)
|
| 1211 |
+
|
| 1212 |
+
# logits for the chosen and rejected samples from model
|
| 1213 |
+
logits_dict = {
|
| 1214 |
+
"eval_logits/chosen": metrics["eval_logits/chosen"],
|
| 1215 |
+
"eval_logits/rejected": metrics["eval_logits/rejected"],
|
| 1216 |
+
}
|
| 1217 |
+
logits = tuple(v.unsqueeze(dim=0) for k, v in logits_dict.items() if k not in ignore_keys)
|
| 1218 |
+
logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device)
|
| 1219 |
+
labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
|
| 1220 |
+
|
| 1221 |
+
return (loss.detach(), logits, labels)
|
| 1222 |
+
|
| 1223 |
+
def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
|
| 1224 |
+
for key, value in metrics.items():
|
| 1225 |
+
self._stored_metrics[train_eval][key].append(value)
|
| 1226 |
+
|
| 1227 |
+
def evaluation_loop(
|
| 1228 |
+
self,
|
| 1229 |
+
dataloader: DataLoader,
|
| 1230 |
+
description: str,
|
| 1231 |
+
prediction_loss_only: Optional[bool] = None,
|
| 1232 |
+
ignore_keys: Optional[list[str]] = None,
|
| 1233 |
+
metric_key_prefix: str = "eval",
|
| 1234 |
+
) -> EvalLoopOutput:
|
| 1235 |
+
"""
|
| 1236 |
+
Overriding built-in evaluation loop to store metrics for each batch.
|
| 1237 |
+
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
|
| 1238 |
+
|
| 1239 |
+
Works both with or without labels.
|
| 1240 |
+
"""
|
| 1241 |
+
|
| 1242 |
+
# Sample and save to game log if requested (for one batch to save time)
|
| 1243 |
+
if self.generate_during_eval:
|
| 1244 |
+
# Generate random indices within the range of the total number of samples
|
| 1245 |
+
num_samples = len(dataloader.dataset)
|
| 1246 |
+
random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
|
| 1247 |
+
|
| 1248 |
+
# Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
|
| 1249 |
+
random_batch_dataset = dataloader.dataset.select(random_indices)
|
| 1250 |
+
random_batch = self.data_collator(random_batch_dataset)
|
| 1251 |
+
random_batch = self._prepare_inputs(random_batch)
|
| 1252 |
+
|
| 1253 |
+
policy_output_decoded = self.generate_from_model(self.model, random_batch)
|
| 1254 |
+
|
| 1255 |
+
table = pd.DataFrame(
|
| 1256 |
+
columns=["Prompt", "Policy"],
|
| 1257 |
+
data=[
|
| 1258 |
+
[prompt, pol[len(prompt) :]] for prompt, pol in zip(random_batch["prompt"], policy_output_decoded)
|
| 1259 |
+
],
|
| 1260 |
+
)
|
| 1261 |
+
if "wandb" in self.args.report_to:
|
| 1262 |
+
wandb.log({"game_log": wandb.Table(data=table)})
|
| 1263 |
+
|
| 1264 |
+
if "comet_ml" in self.args.report_to:
|
| 1265 |
+
log_table_to_comet_experiment(
|
| 1266 |
+
name="game_log.csv",
|
| 1267 |
+
table=table,
|
| 1268 |
+
)
|
| 1269 |
+
|
| 1270 |
+
# Base evaluation
|
| 1271 |
+
initial_output = super().evaluation_loop(
|
| 1272 |
+
dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
|
| 1273 |
+
)
|
| 1274 |
+
|
| 1275 |
+
return initial_output
|
| 1276 |
+
|
| 1277 |
+
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
|
| 1278 |
+
"""
|
| 1279 |
+
Log `logs` on the various objects watching training, including stored metrics.
|
| 1280 |
+
|
| 1281 |
+
Args:
|
| 1282 |
+
logs (`dict[str, float]`):
|
| 1283 |
+
The values to log.
|
| 1284 |
+
start_time (`float` or `None`, *optional*, defaults to `None`):
|
| 1285 |
+
Start time of the training.
|
| 1286 |
+
"""
|
| 1287 |
+
# logs either has 'loss' or 'eval_loss'
|
| 1288 |
+
train_eval = "train" if "loss" in logs else "eval"
|
| 1289 |
+
# Add averaged stored metrics to logs
|
| 1290 |
+
for key, metrics in self._stored_metrics[train_eval].items():
|
| 1291 |
+
logs[key] = torch.tensor(metrics).mean().item()
|
| 1292 |
+
del self._stored_metrics[train_eval]
|
| 1293 |
+
|
| 1294 |
+
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
|
| 1295 |
+
return super().log(logs, start_time)
|
| 1296 |
+
else: # transformers<=4.46
|
| 1297 |
+
return super().log(logs)
|
| 1298 |
+
|
| 1299 |
+
def _shift_right(self, input_ids):
|
| 1300 |
+
if self.decoder_start_token_id is None:
|
| 1301 |
+
raise ValueError(
|
| 1302 |
+
"model.config.decoder_start_token_id has to be defined. It is usually set to the pad_token_id."
|
| 1303 |
+
)
|
| 1304 |
+
|
| 1305 |
+
# shift inputs to the right
|
| 1306 |
+
if is_torch_fx_proxy(input_ids):
|
| 1307 |
+
# Item assignment is not supported natively for proxies.
|
| 1308 |
+
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), self.decoder_start_token_id)
|
| 1309 |
+
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
|
| 1310 |
+
else:
|
| 1311 |
+
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
|
| 1312 |
+
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
|
| 1313 |
+
shifted_input_ids[..., 0] = self.decoder_start_token_id
|
| 1314 |
+
|
| 1315 |
+
if self.pad_token_id is None:
|
| 1316 |
+
raise ValueError("model.config.pad_token_id has to be defined.")
|
| 1317 |
+
# replace possible -100 values in labels by `pad_token_id`
|
| 1318 |
+
shifted_input_ids.masked_fill_(shifted_input_ids == -100, self.pad_token_id)
|
| 1319 |
+
|
| 1320 |
+
return shifted_input_ids
|
| 1321 |
+
|
| 1322 |
+
def create_model_card(
|
| 1323 |
+
self,
|
| 1324 |
+
model_name: Optional[str] = None,
|
| 1325 |
+
dataset_name: Optional[str] = None,
|
| 1326 |
+
tags: Union[str, list[str], None] = None,
|
| 1327 |
+
):
|
| 1328 |
+
"""
|
| 1329 |
+
Creates a draft of a model card using the information available to the `Trainer`.
|
| 1330 |
+
|
| 1331 |
+
Args:
|
| 1332 |
+
model_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1333 |
+
Name of the model.
|
| 1334 |
+
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1335 |
+
Name of the dataset used for training.
|
| 1336 |
+
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
| 1337 |
+
Tags to be associated with the model card.
|
| 1338 |
+
"""
|
| 1339 |
+
if not self.is_world_process_zero():
|
| 1340 |
+
return
|
| 1341 |
+
|
| 1342 |
+
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
| 1343 |
+
base_model = self.model.config._name_or_path
|
| 1344 |
+
else:
|
| 1345 |
+
base_model = None
|
| 1346 |
+
|
| 1347 |
+
tags = tags or []
|
| 1348 |
+
if isinstance(tags, str):
|
| 1349 |
+
tags = [tags]
|
| 1350 |
+
|
| 1351 |
+
if hasattr(self.model.config, "unsloth_version"):
|
| 1352 |
+
tags.append("unsloth")
|
| 1353 |
+
|
| 1354 |
+
citation = textwrap.dedent("""\
|
| 1355 |
+
@article{hong2024orpo,
|
| 1356 |
+
title = {{ORPO: Monolithic Preference Optimization without Reference Model}},
|
| 1357 |
+
author = {Jiwoo Hong and Noah Lee and James Thorne},
|
| 1358 |
+
year = 2024,
|
| 1359 |
+
eprint = {arXiv:2403.07691}
|
| 1360 |
+
}""")
|
| 1361 |
+
|
| 1362 |
+
model_card = generate_model_card(
|
| 1363 |
+
base_model=base_model,
|
| 1364 |
+
model_name=model_name,
|
| 1365 |
+
hub_model_id=self.hub_model_id,
|
| 1366 |
+
dataset_name=dataset_name,
|
| 1367 |
+
tags=tags,
|
| 1368 |
+
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
| 1369 |
+
comet_url=get_comet_experiment_url(),
|
| 1370 |
+
trainer_name="ORPO",
|
| 1371 |
+
trainer_citation=citation,
|
| 1372 |
+
paper_title="ORPO: Monolithic Preference Optimization without Reference Model",
|
| 1373 |
+
paper_id="2403.07691",
|
| 1374 |
+
)
|
| 1375 |
+
|
| 1376 |
+
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
| 1377 |
+
class UnslothORPOTrainer(_UnslothORPOTrainer):
|
| 1378 |
+
"""
|
| 1379 |
+
|
| 1380 |
+
Initialize ORPOTrainer.
|
| 1381 |
+
|
| 1382 |
+
Args:
|
| 1383 |
+
model (`transformers.PreTrainedModel`):
|
| 1384 |
+
The model to train, preferably an `AutoModelForSequenceClassification`.
|
| 1385 |
+
args (`ORPOConfig`):
|
| 1386 |
+
The ORPO config arguments to use for training.
|
| 1387 |
+
data_collator (`transformers.DataCollator`):
|
| 1388 |
+
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
|
| 1389 |
+
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
|
| 1390 |
+
train_dataset (`datasets.Dataset`):
|
| 1391 |
+
The dataset to use for training.
|
| 1392 |
+
eval_dataset (`datasets.Dataset`):
|
| 1393 |
+
The dataset to use for evaluation.
|
| 1394 |
+
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
|
| 1395 |
+
Processing class used to process the data. If provided, will be used to automatically process the inputs
|
| 1396 |
+
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
|
| 1397 |
+
reuse the fine-tuned model.
|
| 1398 |
+
model_init (`Callable[[], transformers.PreTrainedModel]`):
|
| 1399 |
+
The model initializer to use for training. If None is specified, the default model initializer will be used.
|
| 1400 |
+
callbacks (`list[transformers.TrainerCallback]`):
|
| 1401 |
+
The callbacks to use for training.
|
| 1402 |
+
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
|
| 1403 |
+
The optimizer and scheduler to use for training.
|
| 1404 |
+
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
|
| 1405 |
+
The function to use to preprocess the logits before computing the metrics.
|
| 1406 |
+
peft_config (`dict`, defaults to `None`):
|
| 1407 |
+
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
|
| 1408 |
+
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
|
| 1409 |
+
The function to use to compute the metrics. Must take a `EvalPrediction` and return
|
| 1410 |
+
a dictionary string to metric values.
|
| 1411 |
+
|
| 1412 |
+
"""
|
| 1413 |
+
def __init__(
|
| 1414 |
+
self,
|
| 1415 |
+
model = None,
|
| 1416 |
+
args = None,
|
| 1417 |
+
data_collator = None,
|
| 1418 |
+
train_dataset = None,
|
| 1419 |
+
eval_dataset = None,
|
| 1420 |
+
processing_class = None,
|
| 1421 |
+
model_init = None,
|
| 1422 |
+
callbacks = None,
|
| 1423 |
+
preprocess_logits_for_metrics = None,
|
| 1424 |
+
peft_config = None,
|
| 1425 |
+
compute_metrics = None,
|
| 1426 |
+
**kwargs
|
| 1427 |
+
):
|
| 1428 |
+
if args is None: args = UnslothORPOConfig()
|
| 1429 |
+
use_bf16 = getattr(args, 'bf16', False)
|
| 1430 |
+
use_fp16 = getattr(args, 'fp16', False)
|
| 1431 |
+
force_float32 = False
|
| 1432 |
+
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
| 1433 |
+
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
| 1434 |
+
force_float32 = True
|
| 1435 |
+
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
| 1436 |
+
dtype = getattr(model.config, 'torch_dtype', None)
|
| 1437 |
+
if dtype is None: dtype = model.get_input_embeddings().dtype
|
| 1438 |
+
from unsloth_zoo.utils import _get_dtype
|
| 1439 |
+
dtype = _get_dtype(dtype)
|
| 1440 |
+
float16 = dtype == torch.float16
|
| 1441 |
+
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
| 1442 |
+
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
| 1443 |
+
if force_float32:
|
| 1444 |
+
args.fp16 = False
|
| 1445 |
+
args.bf16 = False
|
| 1446 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
| 1447 |
+
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
| 1448 |
+
args.fp16 = float16
|
| 1449 |
+
args.bf16 = not float16
|
| 1450 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
| 1451 |
+
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
| 1452 |
+
args.eval_strategy = 'steps'
|
| 1453 |
+
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
| 1454 |
+
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
| 1455 |
+
if ga_steps is not None and ga_steps > 1:
|
| 1456 |
+
from transformers import __version__ as transformers_version
|
| 1457 |
+
if Version(transformers_version) <= Version('4.45.2'):
|
| 1458 |
+
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
| 1459 |
+
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
| 1460 |
+
if getattr(args, 'eval_strategy', 'no') != 'no':
|
| 1461 |
+
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
| 1462 |
+
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
| 1463 |
+
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
| 1464 |
+
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
| 1465 |
+
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
| 1466 |
+
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
| 1467 |
+
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
| 1468 |
+
if force_float32:
|
| 1469 |
+
args.bf16_full_eval = False
|
| 1470 |
+
args.fp16_full_eval = False
|
| 1471 |
+
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
| 1472 |
+
args.bf16_full_eval = True
|
| 1473 |
+
args.fp16_full_eval = False
|
| 1474 |
+
elif not bf16_full_eval and not fp16_full_eval:
|
| 1475 |
+
args.bf16_full_eval = args.bf16
|
| 1476 |
+
args.fp16_full_eval = args.fp16
|
| 1477 |
+
_output_logits = False
|
| 1478 |
+
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
| 1479 |
+
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
| 1480 |
+
if _output_logits:
|
| 1481 |
+
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
| 1482 |
+
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
| 1483 |
+
pass
|
| 1484 |
+
else:
|
| 1485 |
+
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
| 1486 |
+
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
| 1487 |
+
if args_max_seq_length is None and model_max_seq_length is not None:
|
| 1488 |
+
max_seq_length = model.max_seq_length
|
| 1489 |
+
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
| 1490 |
+
if model is not None and hasattr(model, 'for_training'):
|
| 1491 |
+
model.for_training()
|
| 1492 |
+
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
| 1493 |
+
if 'processing_class' in locals():
|
| 1494 |
+
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
| 1495 |
+
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
| 1496 |
+
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
| 1497 |
+
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
| 1498 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1499 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
| 1500 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer, mlm = False)
|
| 1501 |
+
elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
| 1502 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer)
|
| 1503 |
+
else:
|
| 1504 |
+
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
| 1505 |
+
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
| 1506 |
+
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
| 1507 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1508 |
+
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
| 1509 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
| 1510 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer)
|
| 1511 |
+
else:
|
| 1512 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False)
|
| 1513 |
+
other_metrics = []
|
| 1514 |
+
|
| 1515 |
+
from unsloth_zoo.logging_utils import PatchRLStatistics
|
| 1516 |
+
PatchRLStatistics('orpo_trainer', other_metrics)
|
| 1517 |
+
|
| 1518 |
+
super().__init__(
|
| 1519 |
+
model = model,
|
| 1520 |
+
args = args,
|
| 1521 |
+
data_collator = data_collator,
|
| 1522 |
+
train_dataset = train_dataset,
|
| 1523 |
+
eval_dataset = eval_dataset,
|
| 1524 |
+
processing_class = processing_class,
|
| 1525 |
+
model_init = model_init,
|
| 1526 |
+
callbacks = callbacks,
|
| 1527 |
+
preprocess_logits_for_metrics = preprocess_logits_for_metrics,
|
| 1528 |
+
peft_config = peft_config,
|
| 1529 |
+
compute_metrics = compute_metrics,**kwargs)
|
| 1530 |
+
if hasattr(self, 'neftune_hook_handle'):
|
| 1531 |
+
self.neftune_hook_handle.remove()
|
| 1532 |
+
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
| 1533 |
+
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
| 1534 |
+
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
| 1535 |
+
pass
|
| 1536 |
+
|
| 1537 |
+
pass
|
unsloth_compiled_cache/UnslothOnlineDPOTrainer.py
ADDED
|
@@ -0,0 +1,1263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
2025.4.1
|
| 3 |
+
2025.4.1
|
| 4 |
+
4.51.3
|
| 5 |
+
0.15.2
|
| 6 |
+
__UNSLOTH_VERSIONING__
|
| 7 |
+
"""
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from torch.nn import functional as F
|
| 12 |
+
from trl.trainer.online_dpo_trainer import (Any, BaseImageProcessor, BasePairwiseJudge, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalPrediction, F, FeatureExtractionMixin, GenerationConfig, IterableDataset, OnlineDPOConfig, OnlineDPOTrainer, OptimizerNames, Optional, PREFIX_CHECKPOINT_DIR, PeftModel, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, SIMPLE_CHAT_TEMPLATE, Trainer, TrainerCallback, Union, apply_chat_template, create_reference_model, datasets, disable_dropout_in_model, empty_cache, generate_model_card, get_comet_experiment_url, get_reward, is_conversational, is_peft_available, is_wandb_available, jinja2, logging, maybe_apply_chat_template, nn, np, os, prepare_deepspeed, seed_worker, textwrap, torch, transformers, truncate_right, unwrap_model_for_generation, version, warnings, wraps, F, is_conversational, os, torch)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from typing import *
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from packaging.version import Version
|
| 19 |
+
import torch
|
| 20 |
+
import numpy as np
|
| 21 |
+
from contextlib import nullcontext
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling
|
| 24 |
+
|
| 25 |
+
torch_compile_options = {
|
| 26 |
+
"epilogue_fusion" : True,
|
| 27 |
+
"max_autotune" : False,
|
| 28 |
+
"shape_padding" : True,
|
| 29 |
+
"trace.enabled" : False,
|
| 30 |
+
"triton.cudagraphs" : False,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
| 34 |
+
def selective_log_softmax(logits, index):
|
| 35 |
+
logits = logits.to(torch.float32)
|
| 36 |
+
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
| 37 |
+
# loop to reduce peak mem consumption
|
| 38 |
+
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
| 39 |
+
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
| 40 |
+
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
| 41 |
+
return per_token_logps
|
| 42 |
+
def vLLMSamplingParams(**kwargs):
|
| 43 |
+
from vllm import SamplingParams
|
| 44 |
+
sampling_params = SamplingParams(**kwargs)
|
| 45 |
+
sampling_params._set_kwargs = kwargs
|
| 46 |
+
return sampling_params
|
| 47 |
+
@dataclass
|
| 48 |
+
class UnslothOnlineDPOConfig(OnlineDPOConfig):
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
Configuration class for the [`OnlineDPOTrainer`].
|
| 52 |
+
|
| 53 |
+
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
| 54 |
+
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
| 55 |
+
command line.
|
| 56 |
+
|
| 57 |
+
Parameters:
|
| 58 |
+
learning_rate (`float`, *optional*, defaults to `5e-7`):
|
| 59 |
+
Initial learning rate for [`AdamW`] optimizer. The default value replaces that of
|
| 60 |
+
[`~transformers.TrainingArguments`].
|
| 61 |
+
reward_model_path (`str` or `None`, *optional*, defaults to `None`):
|
| 62 |
+
Path to the reward model. Either `judge` or `reward_model_path` must be set, but not both.
|
| 63 |
+
judge (`str` or `None`, *optional*, defaults to `None`):
|
| 64 |
+
Name of the judge to use. Either `judge` or `reward_model_path` must be set, but not both.
|
| 65 |
+
max_new_tokens (`int`, *optional*, defaults to `64`):
|
| 66 |
+
Maximum number of tokens to generate per completion.
|
| 67 |
+
max_length (`int`, *optional*, defaults to `256`):
|
| 68 |
+
Maximum total length of the sequence (prompt + completion) used to compute log probabilities. If the
|
| 69 |
+
sequence exceeds this limit, the leftmost tokens will be truncated to preserve as much of the completion as
|
| 70 |
+
possible.
|
| 71 |
+
temperature (`float`, *optional*, defaults to `0.9`):
|
| 72 |
+
Temperature for sampling. The higher the temperature, the more random the completions.
|
| 73 |
+
missing_eos_penalty (`float` or `None`, *optional*, defaults to `None`):
|
| 74 |
+
Penalty applied to the score when the model fails to generate an EOS token. This is useful to encourage
|
| 75 |
+
to generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be a positive
|
| 76 |
+
value.
|
| 77 |
+
beta (`float` or `list[float]`, *optional*, defaults to `0.1`):
|
| 78 |
+
Parameter controlling the deviation from the reference model. Higher β means less deviation from the
|
| 79 |
+
reference model. For the IPO loss (`loss_type="ipo"`), β is the regularization parameter denoted by τ in
|
| 80 |
+
the [paper](https://huggingface.co/papers/2310.12036). If a list of floats is provided then the β is
|
| 81 |
+
selected for each new epoch and the last β is used for the rest of the epochs.
|
| 82 |
+
loss_type (`str`, *optional*, defaults to `"sigmoid"`):
|
| 83 |
+
Type of loss to use. Possible values are:
|
| 84 |
+
|
| 85 |
+
- `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper.
|
| 86 |
+
- `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper.
|
| 87 |
+
|
| 88 |
+
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
|
| 89 |
+
Number of processes to use for processing the dataset.
|
| 90 |
+
disable_dropout (`bool`, *optional*, defaults to `True`):
|
| 91 |
+
Whether to disable dropout in the model and reference model.
|
| 92 |
+
use_vllm (`bool`, *optional*, defaults to `False`):
|
| 93 |
+
Whether to use vLLM for generating completions. Requires vLLM to be installed (`pip install vllm`).
|
| 94 |
+
ds3_gather_for_generation (`bool`, *optional*, defaults to `True`):
|
| 95 |
+
This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation,
|
| 96 |
+
improving generation speed. However, disabling this option allows training models that exceed the VRAM
|
| 97 |
+
capacity of a single GPU, albeit at the cost of slower generation.
|
| 98 |
+
|
| 99 |
+
"""
|
| 100 |
+
vllm_sampling_params: Optional[Any] = field(
|
| 101 |
+
default = None,
|
| 102 |
+
metadata = {'help': 'vLLM SamplingParams'},
|
| 103 |
+
)
|
| 104 |
+
unsloth_num_chunks : Optional[int] = field(
|
| 105 |
+
default = -1,
|
| 106 |
+
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
| 107 |
+
)
|
| 108 |
+
def __init__(
|
| 109 |
+
self,
|
| 110 |
+
output_dir = None,
|
| 111 |
+
overwrite_output_dir = None,
|
| 112 |
+
do_train = False,
|
| 113 |
+
do_eval = False,
|
| 114 |
+
do_predict = False,
|
| 115 |
+
eval_strategy = 'no',
|
| 116 |
+
prediction_loss_only = False,
|
| 117 |
+
per_device_train_batch_size = 4,
|
| 118 |
+
per_device_eval_batch_size = 4,
|
| 119 |
+
per_gpu_train_batch_size = None,
|
| 120 |
+
per_gpu_eval_batch_size = None,
|
| 121 |
+
gradient_accumulation_steps = 2,
|
| 122 |
+
eval_accumulation_steps = 2,
|
| 123 |
+
eval_delay = 0,
|
| 124 |
+
torch_empty_cache_steps = 250,
|
| 125 |
+
learning_rate = 5e-05,
|
| 126 |
+
weight_decay = 0.01,
|
| 127 |
+
adam_beta1 = 0.9,
|
| 128 |
+
adam_beta2 = 0.999,
|
| 129 |
+
adam_epsilon = 1e-08,
|
| 130 |
+
max_grad_norm = 1.0,
|
| 131 |
+
num_train_epochs = 3.0,
|
| 132 |
+
max_steps = -1,
|
| 133 |
+
lr_scheduler_type = 'linear',
|
| 134 |
+
warmup_ratio = 0.1,
|
| 135 |
+
warmup_steps = 0,
|
| 136 |
+
log_level = 'passive',
|
| 137 |
+
log_level_replica = 'warning',
|
| 138 |
+
log_on_each_node = True,
|
| 139 |
+
logging_dir = None,
|
| 140 |
+
logging_strategy = 'steps',
|
| 141 |
+
logging_first_step = False,
|
| 142 |
+
logging_steps = 1,
|
| 143 |
+
logging_nan_inf_filter = False,
|
| 144 |
+
save_strategy = 'steps',
|
| 145 |
+
save_steps = 500,
|
| 146 |
+
save_total_limit = None,
|
| 147 |
+
save_safetensors = True,
|
| 148 |
+
save_on_each_node = False,
|
| 149 |
+
save_only_model = False,
|
| 150 |
+
restore_callback_states_from_checkpoint = False,
|
| 151 |
+
no_cuda = False,
|
| 152 |
+
use_cpu = False,
|
| 153 |
+
use_mps_device = False,
|
| 154 |
+
seed = 3407,
|
| 155 |
+
data_seed = 3407,
|
| 156 |
+
jit_mode_eval = False,
|
| 157 |
+
use_ipex = False,
|
| 158 |
+
bf16 = False,
|
| 159 |
+
fp16 = False,
|
| 160 |
+
fp16_opt_level = 'O1',
|
| 161 |
+
half_precision_backend = 'auto',
|
| 162 |
+
bf16_full_eval = False,
|
| 163 |
+
fp16_full_eval = False,
|
| 164 |
+
tf32 = None,
|
| 165 |
+
local_rank = -1,
|
| 166 |
+
ddp_backend = None,
|
| 167 |
+
tpu_num_cores = None,
|
| 168 |
+
tpu_metrics_debug = False,
|
| 169 |
+
debug = '',
|
| 170 |
+
dataloader_drop_last = False,
|
| 171 |
+
eval_steps = None,
|
| 172 |
+
dataloader_num_workers = 0,
|
| 173 |
+
dataloader_prefetch_factor = None,
|
| 174 |
+
past_index = -1,
|
| 175 |
+
run_name = None,
|
| 176 |
+
disable_tqdm = None,
|
| 177 |
+
remove_unused_columns = True,
|
| 178 |
+
label_names = None,
|
| 179 |
+
load_best_model_at_end = False,
|
| 180 |
+
metric_for_best_model = None,
|
| 181 |
+
greater_is_better = None,
|
| 182 |
+
ignore_data_skip = False,
|
| 183 |
+
fsdp = '',
|
| 184 |
+
fsdp_min_num_params = 0,
|
| 185 |
+
fsdp_config = None,
|
| 186 |
+
tp_size = 0,
|
| 187 |
+
fsdp_transformer_layer_cls_to_wrap = None,
|
| 188 |
+
accelerator_config = None,
|
| 189 |
+
deepspeed = None,
|
| 190 |
+
label_smoothing_factor = 0.0,
|
| 191 |
+
optim = 'adamw_8bit',
|
| 192 |
+
optim_args = None,
|
| 193 |
+
adafactor = False,
|
| 194 |
+
group_by_length = False,
|
| 195 |
+
length_column_name = 'length',
|
| 196 |
+
report_to = None,
|
| 197 |
+
ddp_find_unused_parameters = None,
|
| 198 |
+
ddp_bucket_cap_mb = None,
|
| 199 |
+
ddp_broadcast_buffers = None,
|
| 200 |
+
dataloader_pin_memory = True,
|
| 201 |
+
dataloader_persistent_workers = False,
|
| 202 |
+
skip_memory_metrics = True,
|
| 203 |
+
use_legacy_prediction_loop = False,
|
| 204 |
+
push_to_hub = False,
|
| 205 |
+
resume_from_checkpoint = None,
|
| 206 |
+
hub_model_id = None,
|
| 207 |
+
hub_strategy = 'every_save',
|
| 208 |
+
hub_token = None,
|
| 209 |
+
hub_private_repo = None,
|
| 210 |
+
hub_always_push = False,
|
| 211 |
+
gradient_checkpointing = False,
|
| 212 |
+
gradient_checkpointing_kwargs = None,
|
| 213 |
+
include_inputs_for_metrics = False,
|
| 214 |
+
eval_do_concat_batches = True,
|
| 215 |
+
fp16_backend = 'auto',
|
| 216 |
+
push_to_hub_model_id = None,
|
| 217 |
+
push_to_hub_organization = None,
|
| 218 |
+
push_to_hub_token = None,
|
| 219 |
+
mp_parameters = '',
|
| 220 |
+
auto_find_batch_size = False,
|
| 221 |
+
full_determinism = False,
|
| 222 |
+
torchdynamo = None,
|
| 223 |
+
ray_scope = 'last',
|
| 224 |
+
ddp_timeout = 1800,
|
| 225 |
+
torch_compile = False,
|
| 226 |
+
torch_compile_backend = None,
|
| 227 |
+
torch_compile_mode = None,
|
| 228 |
+
include_tokens_per_second = False,
|
| 229 |
+
include_num_input_tokens_seen = False,
|
| 230 |
+
neftune_noise_alpha = None,
|
| 231 |
+
optim_target_modules = None,
|
| 232 |
+
batch_eval_metrics = False,
|
| 233 |
+
eval_on_start = False,
|
| 234 |
+
use_liger_kernel = False,
|
| 235 |
+
eval_use_gather_object = False,
|
| 236 |
+
average_tokens_across_devices = False,
|
| 237 |
+
reward_model_path = None,
|
| 238 |
+
judge = None,
|
| 239 |
+
max_new_tokens = 64,
|
| 240 |
+
max_length = 512,
|
| 241 |
+
temperature = 0.9,
|
| 242 |
+
missing_eos_penalty = None,
|
| 243 |
+
loss_type = 'sigmoid',
|
| 244 |
+
dataset_num_proc = None,
|
| 245 |
+
disable_dropout = True,
|
| 246 |
+
use_vllm = False,
|
| 247 |
+
ds3_gather_for_generation = True,
|
| 248 |
+
vllm_sampling_params = None,
|
| 249 |
+
unsloth_num_chunks = -1,
|
| 250 |
+
**kwargs,
|
| 251 |
+
):
|
| 252 |
+
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
| 253 |
+
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
| 254 |
+
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
| 255 |
+
output_dir = 'unsloth_training_checkpoints'
|
| 256 |
+
save_strategy = 'no'
|
| 257 |
+
if dataset_num_proc is None:
|
| 258 |
+
from multiprocessing import cpu_count
|
| 259 |
+
dataset_num_proc = cpu_count()
|
| 260 |
+
|
| 261 |
+
super().__init__(
|
| 262 |
+
output_dir = output_dir,
|
| 263 |
+
overwrite_output_dir = overwrite_output_dir,
|
| 264 |
+
do_train = do_train,
|
| 265 |
+
do_eval = do_eval,
|
| 266 |
+
do_predict = do_predict,
|
| 267 |
+
eval_strategy = eval_strategy,
|
| 268 |
+
prediction_loss_only = prediction_loss_only,
|
| 269 |
+
per_device_train_batch_size = per_device_train_batch_size,
|
| 270 |
+
per_device_eval_batch_size = per_device_eval_batch_size,
|
| 271 |
+
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
| 272 |
+
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
| 273 |
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 274 |
+
eval_accumulation_steps = eval_accumulation_steps,
|
| 275 |
+
eval_delay = eval_delay,
|
| 276 |
+
torch_empty_cache_steps = torch_empty_cache_steps,
|
| 277 |
+
learning_rate = learning_rate,
|
| 278 |
+
weight_decay = weight_decay,
|
| 279 |
+
adam_beta1 = adam_beta1,
|
| 280 |
+
adam_beta2 = adam_beta2,
|
| 281 |
+
adam_epsilon = adam_epsilon,
|
| 282 |
+
max_grad_norm = max_grad_norm,
|
| 283 |
+
num_train_epochs = num_train_epochs,
|
| 284 |
+
max_steps = max_steps,
|
| 285 |
+
lr_scheduler_type = lr_scheduler_type,
|
| 286 |
+
warmup_ratio = warmup_ratio,
|
| 287 |
+
warmup_steps = warmup_steps,
|
| 288 |
+
log_level = log_level,
|
| 289 |
+
log_level_replica = log_level_replica,
|
| 290 |
+
log_on_each_node = log_on_each_node,
|
| 291 |
+
logging_dir = logging_dir,
|
| 292 |
+
logging_strategy = logging_strategy,
|
| 293 |
+
logging_first_step = logging_first_step,
|
| 294 |
+
logging_steps = logging_steps,
|
| 295 |
+
logging_nan_inf_filter = logging_nan_inf_filter,
|
| 296 |
+
save_strategy = save_strategy,
|
| 297 |
+
save_steps = save_steps,
|
| 298 |
+
save_total_limit = save_total_limit,
|
| 299 |
+
save_safetensors = save_safetensors,
|
| 300 |
+
save_on_each_node = save_on_each_node,
|
| 301 |
+
save_only_model = save_only_model,
|
| 302 |
+
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
| 303 |
+
no_cuda = no_cuda,
|
| 304 |
+
use_cpu = use_cpu,
|
| 305 |
+
use_mps_device = use_mps_device,
|
| 306 |
+
seed = seed,
|
| 307 |
+
data_seed = data_seed,
|
| 308 |
+
jit_mode_eval = jit_mode_eval,
|
| 309 |
+
use_ipex = use_ipex,
|
| 310 |
+
bf16 = bf16,
|
| 311 |
+
fp16 = fp16,
|
| 312 |
+
fp16_opt_level = fp16_opt_level,
|
| 313 |
+
half_precision_backend = half_precision_backend,
|
| 314 |
+
bf16_full_eval = bf16_full_eval,
|
| 315 |
+
fp16_full_eval = fp16_full_eval,
|
| 316 |
+
tf32 = tf32,
|
| 317 |
+
local_rank = local_rank,
|
| 318 |
+
ddp_backend = ddp_backend,
|
| 319 |
+
tpu_num_cores = tpu_num_cores,
|
| 320 |
+
tpu_metrics_debug = tpu_metrics_debug,
|
| 321 |
+
debug = debug,
|
| 322 |
+
dataloader_drop_last = dataloader_drop_last,
|
| 323 |
+
eval_steps = eval_steps,
|
| 324 |
+
dataloader_num_workers = dataloader_num_workers,
|
| 325 |
+
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
| 326 |
+
past_index = past_index,
|
| 327 |
+
run_name = run_name,
|
| 328 |
+
disable_tqdm = disable_tqdm,
|
| 329 |
+
remove_unused_columns = remove_unused_columns,
|
| 330 |
+
label_names = label_names,
|
| 331 |
+
load_best_model_at_end = load_best_model_at_end,
|
| 332 |
+
metric_for_best_model = metric_for_best_model,
|
| 333 |
+
greater_is_better = greater_is_better,
|
| 334 |
+
ignore_data_skip = ignore_data_skip,
|
| 335 |
+
fsdp = fsdp,
|
| 336 |
+
fsdp_min_num_params = fsdp_min_num_params,
|
| 337 |
+
fsdp_config = fsdp_config,
|
| 338 |
+
tp_size = tp_size,
|
| 339 |
+
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
| 340 |
+
accelerator_config = accelerator_config,
|
| 341 |
+
deepspeed = deepspeed,
|
| 342 |
+
label_smoothing_factor = label_smoothing_factor,
|
| 343 |
+
optim = optim,
|
| 344 |
+
optim_args = optim_args,
|
| 345 |
+
adafactor = adafactor,
|
| 346 |
+
group_by_length = group_by_length,
|
| 347 |
+
length_column_name = length_column_name,
|
| 348 |
+
report_to = report_to,
|
| 349 |
+
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
| 350 |
+
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
| 351 |
+
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
| 352 |
+
dataloader_pin_memory = dataloader_pin_memory,
|
| 353 |
+
dataloader_persistent_workers = dataloader_persistent_workers,
|
| 354 |
+
skip_memory_metrics = skip_memory_metrics,
|
| 355 |
+
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
| 356 |
+
push_to_hub = push_to_hub,
|
| 357 |
+
resume_from_checkpoint = resume_from_checkpoint,
|
| 358 |
+
hub_model_id = hub_model_id,
|
| 359 |
+
hub_strategy = hub_strategy,
|
| 360 |
+
hub_token = hub_token,
|
| 361 |
+
hub_private_repo = hub_private_repo,
|
| 362 |
+
hub_always_push = hub_always_push,
|
| 363 |
+
gradient_checkpointing = gradient_checkpointing,
|
| 364 |
+
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
| 365 |
+
include_inputs_for_metrics = include_inputs_for_metrics,
|
| 366 |
+
eval_do_concat_batches = eval_do_concat_batches,
|
| 367 |
+
fp16_backend = fp16_backend,
|
| 368 |
+
push_to_hub_model_id = push_to_hub_model_id,
|
| 369 |
+
push_to_hub_organization = push_to_hub_organization,
|
| 370 |
+
push_to_hub_token = push_to_hub_token,
|
| 371 |
+
mp_parameters = mp_parameters,
|
| 372 |
+
auto_find_batch_size = auto_find_batch_size,
|
| 373 |
+
full_determinism = full_determinism,
|
| 374 |
+
torchdynamo = torchdynamo,
|
| 375 |
+
ray_scope = ray_scope,
|
| 376 |
+
ddp_timeout = ddp_timeout,
|
| 377 |
+
torch_compile = torch_compile,
|
| 378 |
+
torch_compile_backend = torch_compile_backend,
|
| 379 |
+
torch_compile_mode = torch_compile_mode,
|
| 380 |
+
include_tokens_per_second = include_tokens_per_second,
|
| 381 |
+
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
| 382 |
+
neftune_noise_alpha = neftune_noise_alpha,
|
| 383 |
+
optim_target_modules = optim_target_modules,
|
| 384 |
+
batch_eval_metrics = batch_eval_metrics,
|
| 385 |
+
eval_on_start = eval_on_start,
|
| 386 |
+
use_liger_kernel = use_liger_kernel,
|
| 387 |
+
eval_use_gather_object = eval_use_gather_object,
|
| 388 |
+
average_tokens_across_devices = average_tokens_across_devices,
|
| 389 |
+
reward_model_path = reward_model_path,
|
| 390 |
+
judge = judge,
|
| 391 |
+
max_new_tokens = max_new_tokens,
|
| 392 |
+
max_length = max_length,
|
| 393 |
+
temperature = temperature,
|
| 394 |
+
missing_eos_penalty = missing_eos_penalty,
|
| 395 |
+
loss_type = loss_type,
|
| 396 |
+
dataset_num_proc = dataset_num_proc,
|
| 397 |
+
disable_dropout = disable_dropout,
|
| 398 |
+
use_vllm = use_vllm,
|
| 399 |
+
ds3_gather_for_generation = ds3_gather_for_generation,**kwargs)
|
| 400 |
+
self.vllm_sampling_params = vllm_sampling_params
|
| 401 |
+
self.unsloth_num_chunks = unsloth_num_chunks
|
| 402 |
+
pass
|
| 403 |
+
|
| 404 |
+
class _UnslothOnlineDPOTrainer(Trainer):
|
| 405 |
+
r""""""
|
| 406 |
+
|
| 407 |
+
_tag_names = ["trl", "online-dpo"]
|
| 408 |
+
|
| 409 |
+
def __init__(
|
| 410 |
+
self,
|
| 411 |
+
model: Union[PreTrainedModel, nn.Module],
|
| 412 |
+
ref_model: Union[PreTrainedModel, nn.Module, None] = None,
|
| 413 |
+
reward_model: Union[PreTrainedModel, nn.Module, None] = None,
|
| 414 |
+
judge: Optional[BasePairwiseJudge] = None,
|
| 415 |
+
args: Optional[OnlineDPOConfig] = None,
|
| 416 |
+
data_collator: Optional[DataCollator] = None,
|
| 417 |
+
train_dataset: Optional[Union[Dataset, IterableDataset, "datasets.Dataset"]] = None,
|
| 418 |
+
eval_dataset: Optional[Union[Dataset, dict[str, Dataset], "datasets.Dataset"]] = None,
|
| 419 |
+
processing_class: Optional[
|
| 420 |
+
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
|
| 421 |
+
] = None,
|
| 422 |
+
reward_processing_class: Optional[PreTrainedTokenizerBase] = None,
|
| 423 |
+
peft_config: Optional[dict] = None,
|
| 424 |
+
compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None,
|
| 425 |
+
callbacks: Optional[list[TrainerCallback]] = None,
|
| 426 |
+
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
| 427 |
+
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
| 428 |
+
) -> None:
|
| 429 |
+
|
| 430 |
+
if hasattr(model, 'vllm_engine') and hasattr(args, 'use_vllm') and (getattr(args, 'use_vllm', False) == False): args.use_vllm = True
|
| 431 |
+
if ref_model is model:
|
| 432 |
+
raise ValueError(
|
| 433 |
+
"`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the "
|
| 434 |
+
"same as `model`, either omit the `ref_model` argument or pass `None`."
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
self.ref_model = ref_model
|
| 438 |
+
|
| 439 |
+
if reward_model is not None and judge is not None:
|
| 440 |
+
warnings.warn(
|
| 441 |
+
"Both `reward_model` and `judge` are provided. Please choose provide only one of them. "
|
| 442 |
+
"Ignoring `judge` and using `reward_model`.",
|
| 443 |
+
UserWarning,
|
| 444 |
+
)
|
| 445 |
+
judge = None
|
| 446 |
+
elif reward_model is None and judge is None:
|
| 447 |
+
raise ValueError("Either `reward_model` or `judge` must be provided.")
|
| 448 |
+
|
| 449 |
+
self.reward_model = reward_model
|
| 450 |
+
self.reward_processing_class = reward_processing_class
|
| 451 |
+
self.judge = judge
|
| 452 |
+
|
| 453 |
+
if args.missing_eos_penalty is not None and judge is not None:
|
| 454 |
+
raise ValueError("`missing_eos_penalty` is not supported when `judge` is provided.")
|
| 455 |
+
|
| 456 |
+
if args is None:
|
| 457 |
+
raise ValueError("`args` must be provided.")
|
| 458 |
+
|
| 459 |
+
# Check that the processing_class is provided
|
| 460 |
+
if processing_class is None:
|
| 461 |
+
raise ValueError("`processing_class` must be provided.")
|
| 462 |
+
|
| 463 |
+
# Convert to PEFT model if peft_config is provided
|
| 464 |
+
if False:
|
| 465 |
+
# Check if PEFT is available
|
| 466 |
+
if not is_peft_available():
|
| 467 |
+
raise ImportError(
|
| 468 |
+
"PEFT is not available and passed `peft_config`. Please install PEFT with "
|
| 469 |
+
"`pip install peft` to use it."
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
# If the model is already a PeftModel, we need to merge and unload it.
|
| 473 |
+
# Further information here: https://huggingface.co/docs/trl/dpo_trainer#reference-model-considerations-with-peft
|
| 474 |
+
if isinstance(model, PeftModel):
|
| 475 |
+
model = model.merge_and_unload()
|
| 476 |
+
|
| 477 |
+
# Get peft model with the given config
|
| 478 |
+
model = model
|
| 479 |
+
|
| 480 |
+
# Disable dropout in the model and reference model
|
| 481 |
+
if args.disable_dropout:
|
| 482 |
+
disable_dropout_in_model(model)
|
| 483 |
+
if self.ref_model is not None:
|
| 484 |
+
disable_dropout_in_model(self.ref_model)
|
| 485 |
+
|
| 486 |
+
# Handle the ref_model
|
| 487 |
+
# Usually, the user wants the ref model to be the initial version of the model. When using PEFT, it's easy to
|
| 488 |
+
# get the ref model, as it's just the model with a disabled adapter. When not using PEFT, we need to create
|
| 489 |
+
# the ref model from the model by copying it and disable the gradients and set it in evaluation mode.
|
| 490 |
+
if ref_model is None: # No ref model provided, the most common case
|
| 491 |
+
if False:
|
| 492 |
+
self.ref_model = create_reference_model(model) # copy, disable gradients, set eval mode
|
| 493 |
+
else:
|
| 494 |
+
self.ref_model = None # we don't need a ref model here, we can just disable the adapter.
|
| 495 |
+
else: # rare case, the user provided a ref model
|
| 496 |
+
self.ref_model = ref_model
|
| 497 |
+
self.ref_model.eval()
|
| 498 |
+
|
| 499 |
+
# Disable the gradient and set the reward model in eval mode
|
| 500 |
+
if self.reward_model is not None:
|
| 501 |
+
self.reward_model.eval()
|
| 502 |
+
|
| 503 |
+
# Define the collator is not provided
|
| 504 |
+
if data_collator is None:
|
| 505 |
+
data_collator = DPODataCollatorWithPadding(pad_token_id=processing_class.pad_token_id)
|
| 506 |
+
|
| 507 |
+
self.max_length = args.max_length
|
| 508 |
+
|
| 509 |
+
self.stats = {
|
| 510 |
+
"objective/kl": [],
|
| 511 |
+
"objective/entropy": [],
|
| 512 |
+
"objective/non_score_reward": [],
|
| 513 |
+
"rewards/chosen": [],
|
| 514 |
+
"rewards/rejected": [],
|
| 515 |
+
"rewards/accuracies": [],
|
| 516 |
+
"rewards/margins": [],
|
| 517 |
+
"logps/chosen": [],
|
| 518 |
+
"logps/rejected": [],
|
| 519 |
+
"val/contain_eos_token": [],
|
| 520 |
+
"beta": [],
|
| 521 |
+
}
|
| 522 |
+
if self.reward_model is not None:
|
| 523 |
+
self.stats["objective/rlhf_reward"] = []
|
| 524 |
+
self.stats["objective/scores_margin"] = []
|
| 525 |
+
self.stats["objective/scores"] = []
|
| 526 |
+
|
| 527 |
+
if args.use_vllm:
|
| 528 |
+
self.llm = model.vllm_engine; self._last_loaded_step = 0; self.generation_config = SamplingParams(
|
| 529 |
+
n=2, max_tokens=args.max_new_tokens,
|
| 530 |
+
temperature=args.temperature,
|
| 531 |
+
top_k=50,
|
| 532 |
+
top_p=1.0,
|
| 533 |
+
detokenize=False,**getattr(getattr(args, 'vllm_sampling_params', vLLMSamplingParams()), '_set_kwargs', {}),)
|
| 534 |
+
else:
|
| 535 |
+
self.generation_config = GenerationConfig(
|
| 536 |
+
max_new_tokens=args.max_new_tokens,
|
| 537 |
+
temperature=args.temperature,
|
| 538 |
+
top_k=50,
|
| 539 |
+
top_p=1.0,
|
| 540 |
+
do_sample=True,
|
| 541 |
+
use_cache=False if args.gradient_checkpointing else True,
|
| 542 |
+
)
|
| 543 |
+
|
| 544 |
+
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
|
| 545 |
+
# input tensor associated with the key "input_ids". However, in Online DPO, the sampled data does not include
|
| 546 |
+
# the "input_ids" key. As a result, the trainer issues the warning: "Could not estimate the number of tokens
|
| 547 |
+
# of the input, floating-point operations will not be computed." To suppress this warning, we set the
|
| 548 |
+
# "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate
|
| 549 |
+
# that the warning has already been issued.
|
| 550 |
+
model.warnings_issued["estimate_tokens"] = True
|
| 551 |
+
|
| 552 |
+
super().__init__(
|
| 553 |
+
model=model,
|
| 554 |
+
args=args,
|
| 555 |
+
data_collator=data_collator,
|
| 556 |
+
train_dataset=train_dataset,
|
| 557 |
+
eval_dataset=eval_dataset,
|
| 558 |
+
processing_class=processing_class,
|
| 559 |
+
compute_metrics=compute_metrics,
|
| 560 |
+
callbacks=callbacks,
|
| 561 |
+
optimizers=optimizers,
|
| 562 |
+
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
# Add tags for models that have been loaded with the correct transformers version
|
| 566 |
+
if hasattr(self.model, "add_model_tags"):
|
| 567 |
+
self.model.add_model_tags(self._tag_names)
|
| 568 |
+
|
| 569 |
+
self._beta = args.beta
|
| 570 |
+
|
| 571 |
+
# Placed after the super().__init__ because we need self.is_deepspeed_enabled and self.accelerator
|
| 572 |
+
if self.is_deepspeed_enabled:
|
| 573 |
+
if self.reward_model is not None:
|
| 574 |
+
self.reward_model = prepare_deepspeed(
|
| 575 |
+
self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16
|
| 576 |
+
)
|
| 577 |
+
if self.ref_model is not None:
|
| 578 |
+
self.ref_model = prepare_deepspeed(
|
| 579 |
+
self.ref_model, args.per_device_train_batch_size, args.fp16, args.bf16
|
| 580 |
+
)
|
| 581 |
+
else:
|
| 582 |
+
if self.ref_model is not None:
|
| 583 |
+
self.ref_model = self.ref_model.to(self.accelerator.device)
|
| 584 |
+
if self.reward_model is not None:
|
| 585 |
+
self.reward_model = self.reward_model.to(self.accelerator.device)
|
| 586 |
+
|
| 587 |
+
@property
|
| 588 |
+
def beta(self):
|
| 589 |
+
if isinstance(self._beta, list):
|
| 590 |
+
epoch = self.state.epoch
|
| 591 |
+
return self._beta[epoch] if epoch < len(self._beta) else self._beta[-1]
|
| 592 |
+
else:
|
| 593 |
+
return self._beta
|
| 594 |
+
|
| 595 |
+
@staticmethod
|
| 596 |
+
def tokenize_row(feature, is_encoder_decoder: bool, tokenizer: PreTrainedTokenizerBase) -> dict[str, Any]:
|
| 597 |
+
"""Tokenize a single row from a DPO specific dataset."""
|
| 598 |
+
if not is_encoder_decoder:
|
| 599 |
+
batch = tokenizer(feature["prompt"], add_special_tokens=False)
|
| 600 |
+
# Add BOS token to head of prompt. Avoid adding if it's already there
|
| 601 |
+
if tokenizer.bos_token_id is not None:
|
| 602 |
+
prompt_len_input_ids = len(batch["input_ids"])
|
| 603 |
+
if prompt_len_input_ids == 0 or tokenizer.bos_token_id != batch["input_ids"][0]:
|
| 604 |
+
batch["input_ids"] = [tokenizer.bos_token_id] + batch["input_ids"]
|
| 605 |
+
batch["attention_mask"] = [1] + batch["attention_mask"]
|
| 606 |
+
else:
|
| 607 |
+
batch = tokenizer(feature["prompt"], add_special_tokens=True)
|
| 608 |
+
batch = {f"prompt_{key}": value for key, value in batch.items()}
|
| 609 |
+
return batch
|
| 610 |
+
|
| 611 |
+
# Same as Trainer.get_train_dataloader but skip the "remove_unused_columns".
|
| 612 |
+
@wraps(Trainer.get_train_dataloader)
|
| 613 |
+
def get_train_dataloader(self) -> DataLoader:
|
| 614 |
+
if self.train_dataset is None:
|
| 615 |
+
raise ValueError("Trainer: training requires a train_dataset.")
|
| 616 |
+
|
| 617 |
+
train_dataset = self.train_dataset
|
| 618 |
+
data_collator = self.data_collator
|
| 619 |
+
dataloader_params = {
|
| 620 |
+
"batch_size": self._train_batch_size,
|
| 621 |
+
"collate_fn": data_collator,
|
| 622 |
+
"num_workers": self.args.dataloader_num_workers,
|
| 623 |
+
"pin_memory": self.args.dataloader_pin_memory,
|
| 624 |
+
"persistent_workers": self.args.dataloader_persistent_workers,
|
| 625 |
+
}
|
| 626 |
+
|
| 627 |
+
if not isinstance(train_dataset, torch.utils.data.IterableDataset):
|
| 628 |
+
dataloader_params["sampler"] = self._get_train_sampler()
|
| 629 |
+
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
| 630 |
+
dataloader_params["worker_init_fn"] = seed_worker
|
| 631 |
+
dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor
|
| 632 |
+
|
| 633 |
+
return self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params))
|
| 634 |
+
|
| 635 |
+
# Same as Trainer.get_eval_dataloader but skip the "remove_unused_columns".
|
| 636 |
+
@wraps(Trainer.get_eval_dataloader)
|
| 637 |
+
def get_eval_dataloader(self, eval_dataset: Optional[Union[str, Dataset]] = None) -> DataLoader:
|
| 638 |
+
if eval_dataset is None and self.eval_dataset is None:
|
| 639 |
+
raise ValueError("Trainer: evaluation requires an eval_dataset.")
|
| 640 |
+
|
| 641 |
+
# If we have persistent workers, don't do a fork bomb especially as eval datasets
|
| 642 |
+
# don't change during training
|
| 643 |
+
dataloader_key = eval_dataset if isinstance(eval_dataset, str) else "eval"
|
| 644 |
+
if (
|
| 645 |
+
hasattr(self, "_eval_dataloaders")
|
| 646 |
+
and dataloader_key in self._eval_dataloaders
|
| 647 |
+
and self.args.dataloader_persistent_workers
|
| 648 |
+
):
|
| 649 |
+
return self.accelerator.prepare(self._eval_dataloaders[dataloader_key])
|
| 650 |
+
|
| 651 |
+
eval_dataset = (
|
| 652 |
+
self.eval_dataset[eval_dataset]
|
| 653 |
+
if isinstance(eval_dataset, str)
|
| 654 |
+
else eval_dataset
|
| 655 |
+
if eval_dataset is not None
|
| 656 |
+
else self.eval_dataset
|
| 657 |
+
)
|
| 658 |
+
data_collator = self.data_collator
|
| 659 |
+
|
| 660 |
+
dataloader_params = {
|
| 661 |
+
"batch_size": self.args.eval_batch_size,
|
| 662 |
+
"collate_fn": data_collator,
|
| 663 |
+
"num_workers": self.args.dataloader_num_workers,
|
| 664 |
+
"pin_memory": self.args.dataloader_pin_memory,
|
| 665 |
+
"persistent_workers": self.args.dataloader_persistent_workers,
|
| 666 |
+
}
|
| 667 |
+
|
| 668 |
+
if not isinstance(eval_dataset, torch.utils.data.IterableDataset):
|
| 669 |
+
dataloader_params["sampler"] = self._get_eval_sampler(eval_dataset)
|
| 670 |
+
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
| 671 |
+
dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor
|
| 672 |
+
|
| 673 |
+
# accelerator.free_memory() will destroy the references, so
|
| 674 |
+
# we need to store the non-prepared version
|
| 675 |
+
eval_dataloader = DataLoader(eval_dataset, **dataloader_params)
|
| 676 |
+
if self.args.dataloader_persistent_workers:
|
| 677 |
+
if hasattr(self, "_eval_dataloaders"):
|
| 678 |
+
self._eval_dataloaders[dataloader_key] = eval_dataloader
|
| 679 |
+
else:
|
| 680 |
+
self._eval_dataloaders = {dataloader_key: eval_dataloader}
|
| 681 |
+
|
| 682 |
+
return self.accelerator.prepare(eval_dataloader)
|
| 683 |
+
|
| 684 |
+
def _generate_vllm(self, model, prompts):
|
| 685 |
+
eos_token_id = self.processing_class.eos_token_id
|
| 686 |
+
pad_token_id = self.processing_class.pad_token_id
|
| 687 |
+
|
| 688 |
+
# Load the latest weights
|
| 689 |
+
|
| 690 |
+
pass
|
| 691 |
+
|
| 692 |
+
pass
|
| 693 |
+
|
| 694 |
+
if is_conversational({"prompt": prompts[0]}):
|
| 695 |
+
outputs = self.llm.chat(prompts, self.generation_config, use_tqdm=False, lora_request = self.model.load_lora('online_dpo_trainer_lora_model', load_tensors = True))
|
| 696 |
+
else:
|
| 697 |
+
outputs = self.llm.generate(prompts, self.generation_config, use_tqdm=False, lora_request = self.model.load_lora('online_dpo_trainer_lora_model', load_tensors = True))
|
| 698 |
+
|
| 699 |
+
completion_ids = [list(output.outputs[i].token_ids) for i in range(2) for output in outputs]
|
| 700 |
+
prompt_ids = [list(output.prompt_token_ids) for _ in range(2) for output in outputs]
|
| 701 |
+
|
| 702 |
+
# Create mask and pad the prompt and completion
|
| 703 |
+
max_prompt_length = max(len(ids) for ids in prompt_ids)
|
| 704 |
+
prompt_mask = [[0] * (max_prompt_length - len(ids)) + [1] * len(ids) for ids in prompt_ids]
|
| 705 |
+
prompt_ids = [[pad_token_id] * (max_prompt_length - len(ids)) + ids for ids in prompt_ids]
|
| 706 |
+
max_tokens = self.generation_config.max_tokens
|
| 707 |
+
completion_mask = [[1] * len(ids) + [0] * (max_tokens - len(ids)) for ids in completion_ids]
|
| 708 |
+
completion_ids = [
|
| 709 |
+
ids + [eos_token_id] if ids[-1] != eos_token_id and len(ids) < max_tokens else ids
|
| 710 |
+
for ids in completion_ids
|
| 711 |
+
]
|
| 712 |
+
completion_ids = [ids + [pad_token_id] * (max_tokens - len(ids)) for ids in completion_ids]
|
| 713 |
+
|
| 714 |
+
# Convert to tensors
|
| 715 |
+
prompt_ids = torch.tensor(prompt_ids, device=self.accelerator.device)
|
| 716 |
+
prompt_mask = torch.tensor(prompt_mask, device=self.accelerator.device)
|
| 717 |
+
completion_ids = torch.tensor(completion_ids, device=self.accelerator.device)
|
| 718 |
+
completion_mask = torch.tensor(completion_mask, device=self.accelerator.device)
|
| 719 |
+
|
| 720 |
+
return prompt_ids, prompt_mask, completion_ids, completion_mask
|
| 721 |
+
|
| 722 |
+
def _generate(self, model, prompts):
|
| 723 |
+
eos_token_id = self.processing_class.eos_token_id
|
| 724 |
+
pad_token_id = self.processing_class.pad_token_id
|
| 725 |
+
|
| 726 |
+
# Apply chat template and tokenize the input. We do this on-the-fly to enable the use of reward models and
|
| 727 |
+
# policies with different tokenizers / chat templates.
|
| 728 |
+
inputs = [{"prompt": prompt} for prompt in prompts]
|
| 729 |
+
inputs = [maybe_apply_chat_template(x, self.processing_class) for x in inputs]
|
| 730 |
+
inputs = [self.tokenize_row(x, model.config.is_encoder_decoder, self.processing_class) for x in inputs]
|
| 731 |
+
inputs = self.data_collator(inputs)
|
| 732 |
+
|
| 733 |
+
# Sample 2 completions per prompt of size `max_new_tokens` from the model
|
| 734 |
+
inputs = self._prepare_inputs(inputs)
|
| 735 |
+
prompt_ids = inputs["prompt_input_ids"].repeat(2, 1)
|
| 736 |
+
prompt_mask = inputs["prompt_attention_mask"].repeat(2, 1)
|
| 737 |
+
with unwrap_model_for_generation(
|
| 738 |
+
model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation
|
| 739 |
+
) as unwrapped_model:
|
| 740 |
+
output = unwrapped_model.generate(
|
| 741 |
+
input_ids=prompt_ids,
|
| 742 |
+
attention_mask=prompt_mask,
|
| 743 |
+
generation_config=self.generation_config,
|
| 744 |
+
)
|
| 745 |
+
|
| 746 |
+
completion_ids = output[:, prompt_ids.size(1) :]
|
| 747 |
+
completion_ids, completion_mask = truncate_right(completion_ids, eos_token_id, pad_token_id)
|
| 748 |
+
|
| 749 |
+
return prompt_ids, prompt_mask, completion_ids, completion_mask
|
| 750 |
+
|
| 751 |
+
def _forward(self, model, prompt_ids, prompt_mask, completion_ids, completion_mask):
|
| 752 |
+
# Get the number of tokens to truncate from prompt
|
| 753 |
+
num_tokens_to_truncate = max(prompt_ids.size(1) + completion_ids.size(1) - self.max_length, 0)
|
| 754 |
+
|
| 755 |
+
# Truncate left to avoid oom
|
| 756 |
+
prompt_ids = prompt_ids[:, num_tokens_to_truncate:]
|
| 757 |
+
prompt_mask = prompt_mask[:, num_tokens_to_truncate:]
|
| 758 |
+
|
| 759 |
+
# Concat the prompt and completion
|
| 760 |
+
prompt_completion_ids = torch.cat((prompt_ids, completion_ids), dim=1)
|
| 761 |
+
prompt_completion_mask = torch.cat((prompt_mask, completion_mask), dim=1)
|
| 762 |
+
|
| 763 |
+
# Get the logprobs of the completions from the model
|
| 764 |
+
output = model(prompt_completion_ids, attention_mask=prompt_completion_mask)
|
| 765 |
+
|
| 766 |
+
# There is 1 offset, because the model predict the next token
|
| 767 |
+
logits = output.logits[:, prompt_ids.size(1) - 1 : -1]
|
| 768 |
+
|
| 769 |
+
# Take the completion tokens logprob
|
| 770 |
+
logprobs = torch.take_along_dim(logits.log_softmax(dim=-1), completion_ids.unsqueeze(-1), dim=2).squeeze(-1)
|
| 771 |
+
return logprobs
|
| 772 |
+
|
| 773 |
+
def training_step(
|
| 774 |
+
self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None
|
| 775 |
+
) -> torch.Tensor:
|
| 776 |
+
model.train()
|
| 777 |
+
|
| 778 |
+
prompts = inputs["prompt"]
|
| 779 |
+
batch_size = len(prompts)
|
| 780 |
+
|
| 781 |
+
if self.args.use_vllm:
|
| 782 |
+
prompt_ids, prompt_mask, completion_ids, completion_mask = self._generate_vllm(model, prompts)
|
| 783 |
+
else:
|
| 784 |
+
prompt_ids, prompt_mask, completion_ids, completion_mask = self._generate(model, prompts)
|
| 785 |
+
|
| 786 |
+
contain_eos_token = torch.any(completion_ids == self.processing_class.eos_token_id, dim=-1)
|
| 787 |
+
|
| 788 |
+
logprobs = self._forward(model, prompt_ids, prompt_mask, completion_ids, completion_mask)
|
| 789 |
+
with torch.no_grad():
|
| 790 |
+
if self.ref_model is not None:
|
| 791 |
+
ref_logprobs = self._forward(self.ref_model, prompt_ids, prompt_mask, completion_ids, completion_mask)
|
| 792 |
+
else: # peft case: we just need to disable the adapter
|
| 793 |
+
with self.model.disable_adapter():
|
| 794 |
+
ref_logprobs = self._forward(self.model, prompt_ids, prompt_mask, completion_ids, completion_mask)
|
| 795 |
+
|
| 796 |
+
# Decode the completions, and format them if the input is conversational
|
| 797 |
+
device = logprobs.device
|
| 798 |
+
completions = self.processing_class.batch_decode(completion_ids, skip_special_tokens=True)
|
| 799 |
+
if is_conversational({"prompt": prompts[0]}):
|
| 800 |
+
completions = [[{"role": "assistant", "content": completion}] for completion in completions]
|
| 801 |
+
|
| 802 |
+
# Get the reward from the reward model or judge
|
| 803 |
+
if self.judge is not None:
|
| 804 |
+
# Once formatted, conversational data may contain special tokens (such as <|im_start|>) that are not
|
| 805 |
+
# directly understandable by the judge and could alter its judgment. To avoid this and make the judge
|
| 806 |
+
# independent of the model's chat template, we use the raw conversation data, and apply our own chat
|
| 807 |
+
# template to it.
|
| 808 |
+
if is_conversational({"prompt": prompts[0]}):
|
| 809 |
+
environment = jinja2.Environment()
|
| 810 |
+
template = environment.from_string(SIMPLE_CHAT_TEMPLATE)
|
| 811 |
+
prompts = [template.render(messages=prompt) for prompt in prompts]
|
| 812 |
+
completions = [template.render(messages=completion) for completion in completions]
|
| 813 |
+
|
| 814 |
+
ranks_of_first_completion = self.judge.judge(
|
| 815 |
+
prompts, list(zip(completions[:batch_size], completions[batch_size:]))
|
| 816 |
+
)
|
| 817 |
+
|
| 818 |
+
# convert ranks to a True/False mask:
|
| 819 |
+
# when rank == 0, it means the first completion is the best
|
| 820 |
+
# when rank == 1, it means the second completion is the best
|
| 821 |
+
mask = torch.tensor([rank == 0 for rank in ranks_of_first_completion], device=device)
|
| 822 |
+
else:
|
| 823 |
+
# The reward model may not have the same chat template or tokenizer as the model, so we need to use the
|
| 824 |
+
# raw data (string), apply the chat template (if needed), and tokenize it with the reward processing class.
|
| 825 |
+
prompts = 2 * prompts # repeat the prompt: [prompt0, prompt1] -> [prompt0, prompt1, prompt0, prompt1]
|
| 826 |
+
if is_conversational({"prompt": prompts[0]}):
|
| 827 |
+
examples = [{"prompt": p, "completion": c} for p, c in zip(prompts, completions)]
|
| 828 |
+
examples = [apply_chat_template(example, self.reward_processing_class) for example in examples]
|
| 829 |
+
prompts = [example["prompt"] for example in examples]
|
| 830 |
+
completions = [example["completion"] for example in examples]
|
| 831 |
+
|
| 832 |
+
# Tokenize the prompts
|
| 833 |
+
prompts_ids = self.reward_processing_class(
|
| 834 |
+
prompts, padding=True, return_tensors="pt", padding_side="left"
|
| 835 |
+
)["input_ids"].to(device)
|
| 836 |
+
context_length = prompts_ids.shape[1]
|
| 837 |
+
|
| 838 |
+
# Tokenize the completions
|
| 839 |
+
completions_ids = self.reward_processing_class(
|
| 840 |
+
completions, padding=True, return_tensors="pt", padding_side="right"
|
| 841 |
+
)["input_ids"].to(device)
|
| 842 |
+
|
| 843 |
+
# Concatenate the prompts and completions and get the reward
|
| 844 |
+
prompt_completion_ids = torch.cat((prompts_ids, completions_ids), dim=1)
|
| 845 |
+
with torch.inference_mode():
|
| 846 |
+
_, scores, _ = get_reward(
|
| 847 |
+
self.reward_model, prompt_completion_ids, self.reward_processing_class.pad_token_id, context_length
|
| 848 |
+
)
|
| 849 |
+
|
| 850 |
+
# Filter completion. Ensure that the sample contains stop_token_id
|
| 851 |
+
# Completions not passing that filter will receive a lower score.
|
| 852 |
+
if self.args.missing_eos_penalty is not None:
|
| 853 |
+
scores[~contain_eos_token] -= self.args.missing_eos_penalty
|
| 854 |
+
|
| 855 |
+
# Split the scores in 2 (the prompts of the first half are the same as the second half)
|
| 856 |
+
first_half, second_half = scores.split(batch_size)
|
| 857 |
+
|
| 858 |
+
# Get the indices of the chosen and rejected examples
|
| 859 |
+
mask = first_half >= second_half
|
| 860 |
+
|
| 861 |
+
batch_range = torch.arange(batch_size, device=device)
|
| 862 |
+
chosen_indices = batch_range + (~mask * batch_size)
|
| 863 |
+
rejected_indices = batch_range + (mask * batch_size)
|
| 864 |
+
|
| 865 |
+
# Build tensor so that the first half is the chosen examples and the second half the rejected examples
|
| 866 |
+
cr_indices = torch.cat((chosen_indices, rejected_indices), dim=0) # cr = chosen and rejected
|
| 867 |
+
cr_logprobs = logprobs[cr_indices]
|
| 868 |
+
cr_ref_logprobs = ref_logprobs[cr_indices]
|
| 869 |
+
|
| 870 |
+
# mask out the padding tokens
|
| 871 |
+
padding_mask = ~completion_mask.bool()
|
| 872 |
+
cr_padding_mask = padding_mask[cr_indices]
|
| 873 |
+
|
| 874 |
+
cr_logprobs_sum = (cr_logprobs * ~cr_padding_mask).sum(1)
|
| 875 |
+
cr_ref_logprobs_sum = (cr_ref_logprobs * ~cr_padding_mask).sum(1)
|
| 876 |
+
|
| 877 |
+
# Split the chosen and rejected examples
|
| 878 |
+
chosen_logprobs_sum, rejected_logprobs_sum = torch.split(cr_logprobs_sum, batch_size)
|
| 879 |
+
chosen_ref_logprobs_sum, rejected_ref_logprobs_sum = torch.split(cr_ref_logprobs_sum, batch_size)
|
| 880 |
+
pi_logratios = chosen_logprobs_sum - rejected_logprobs_sum
|
| 881 |
+
ref_logratios = chosen_ref_logprobs_sum - rejected_ref_logprobs_sum
|
| 882 |
+
|
| 883 |
+
logits = pi_logratios - ref_logratios
|
| 884 |
+
|
| 885 |
+
if self.args.loss_type == "sigmoid":
|
| 886 |
+
losses = -F.logsigmoid(self.beta * logits)
|
| 887 |
+
elif self.args.loss_type == "ipo":
|
| 888 |
+
losses = (logits - 1 / (2 * self.beta)) ** 2
|
| 889 |
+
else:
|
| 890 |
+
raise NotImplementedError(f"invalid loss type {self.loss_type}")
|
| 891 |
+
|
| 892 |
+
loss = losses.mean()
|
| 893 |
+
|
| 894 |
+
# Log everything
|
| 895 |
+
if self.reward_model is not None:
|
| 896 |
+
scores_margin = scores[chosen_indices] - scores[rejected_indices]
|
| 897 |
+
self.stats["objective/scores_margin"].append(
|
| 898 |
+
self.accelerator.gather_for_metrics(scores_margin.mean()).mean().item()
|
| 899 |
+
)
|
| 900 |
+
self.stats["objective/scores"].append(self.accelerator.gather_for_metrics(scores.mean()).mean().item())
|
| 901 |
+
self.stats["val/contain_eos_token"].append(contain_eos_token.float().mean().item())
|
| 902 |
+
self.stats["logps/chosen"].append(self.accelerator.gather_for_metrics(chosen_logprobs_sum).mean().item())
|
| 903 |
+
self.stats["logps/rejected"].append(self.accelerator.gather_for_metrics(rejected_logprobs_sum).mean().item())
|
| 904 |
+
|
| 905 |
+
kl = logprobs - ref_logprobs
|
| 906 |
+
mean_kl = kl.sum(1).mean()
|
| 907 |
+
self.stats["objective/kl"].append(self.accelerator.gather_for_metrics(mean_kl).mean().item())
|
| 908 |
+
non_score_reward = (-self.beta * kl).sum(1)
|
| 909 |
+
mean_non_score_reward = non_score_reward.mean()
|
| 910 |
+
self.stats["objective/non_score_reward"].append(
|
| 911 |
+
self.accelerator.gather_for_metrics(mean_non_score_reward).mean().item()
|
| 912 |
+
)
|
| 913 |
+
if self.reward_model is not None:
|
| 914 |
+
rlhf_reward = scores + non_score_reward
|
| 915 |
+
self.stats["objective/rlhf_reward"].append(self.accelerator.gather_for_metrics(rlhf_reward).mean().item())
|
| 916 |
+
mean_entropy = -logprobs.sum(1).mean()
|
| 917 |
+
self.stats["objective/entropy"].append(self.accelerator.gather_for_metrics(mean_entropy).mean().item())
|
| 918 |
+
chosen_rewards = self.beta * (chosen_logprobs_sum - chosen_ref_logprobs_sum)
|
| 919 |
+
gathered_chosen_rewards = self.accelerator.gather_for_metrics(chosen_rewards)
|
| 920 |
+
self.stats["rewards/chosen"].append(gathered_chosen_rewards.mean().item())
|
| 921 |
+
rejected_rewards = self.beta * (rejected_logprobs_sum - rejected_ref_logprobs_sum)
|
| 922 |
+
gathered_rejected_rewards = self.accelerator.gather_for_metrics(rejected_rewards)
|
| 923 |
+
self.stats["rewards/rejected"].append(gathered_rejected_rewards.mean().item())
|
| 924 |
+
margin = gathered_chosen_rewards - gathered_rejected_rewards
|
| 925 |
+
self.stats["rewards/margins"].append(margin.mean().item())
|
| 926 |
+
accuracy = margin > 0
|
| 927 |
+
self.stats["rewards/accuracies"].append(accuracy.float().mean().item())
|
| 928 |
+
self.stats["beta"].append(self.beta)
|
| 929 |
+
|
| 930 |
+
if (
|
| 931 |
+
self.args.torch_empty_cache_steps is not None
|
| 932 |
+
and self.state.global_step % self.args.torch_empty_cache_steps == 0
|
| 933 |
+
):
|
| 934 |
+
empty_cache()
|
| 935 |
+
|
| 936 |
+
kwargs = {}
|
| 937 |
+
|
| 938 |
+
# For LOMO optimizers you need to explicitly use the learnign rate
|
| 939 |
+
if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]:
|
| 940 |
+
kwargs["learning_rate"] = self._get_learning_rate()
|
| 941 |
+
|
| 942 |
+
if self.args.n_gpu > 1:
|
| 943 |
+
loss = loss.mean() # mean() to average on multi-gpu parallel training
|
| 944 |
+
|
| 945 |
+
if self.use_apex:
|
| 946 |
+
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
|
| 947 |
+
scaled_loss.backward()
|
| 948 |
+
else:
|
| 949 |
+
self.accelerator.backward(loss, **kwargs)
|
| 950 |
+
|
| 951 |
+
return loss.detach() / self.args.gradient_accumulation_steps
|
| 952 |
+
|
| 953 |
+
# Same as Trainer._maybe_log_save_evaluate but log our metrics
|
| 954 |
+
# start_time defaults to None to allow compatibility with transformers<=4.46
|
| 955 |
+
def _maybe_log_save_evaluate(self, tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time=None):
|
| 956 |
+
if self.control.should_log and self.state.global_step > self._globalstep_last_logged:
|
| 957 |
+
logs: dict[str, float] = {}
|
| 958 |
+
|
| 959 |
+
# all_gather + mean() to get average loss over all processes
|
| 960 |
+
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
|
| 961 |
+
|
| 962 |
+
# reset tr_loss to zero
|
| 963 |
+
tr_loss -= tr_loss
|
| 964 |
+
|
| 965 |
+
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
|
| 966 |
+
if grad_norm is not None:
|
| 967 |
+
logs["grad_norm"] = grad_norm.detach().item() if isinstance(grad_norm, torch.Tensor) else grad_norm
|
| 968 |
+
logs["learning_rate"] = self._get_learning_rate()
|
| 969 |
+
|
| 970 |
+
# Add our metrics
|
| 971 |
+
for key, val in self.stats.items():
|
| 972 |
+
logs[key] = sum(val) / len(val)
|
| 973 |
+
self.stats = {key: [] for key in self.stats} # reset stats
|
| 974 |
+
|
| 975 |
+
self._total_loss_scalar += tr_loss_scalar
|
| 976 |
+
self._globalstep_last_logged = self.state.global_step
|
| 977 |
+
self.store_flos()
|
| 978 |
+
|
| 979 |
+
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
|
| 980 |
+
self.log(logs, start_time)
|
| 981 |
+
else: # transformers<=4.46
|
| 982 |
+
self.log(logs)
|
| 983 |
+
|
| 984 |
+
metrics = None
|
| 985 |
+
if self.control.should_evaluate:
|
| 986 |
+
metrics = self._evaluate(trial, ignore_keys_for_eval)
|
| 987 |
+
is_new_best_metric = self._determine_best_metric(metrics=metrics, trial=trial)
|
| 988 |
+
|
| 989 |
+
if self.args.save_strategy == "best":
|
| 990 |
+
self.control.should_save = is_new_best_metric
|
| 991 |
+
|
| 992 |
+
if self.control.should_save:
|
| 993 |
+
self._save_checkpoint(model, trial)
|
| 994 |
+
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
|
| 995 |
+
|
| 996 |
+
# Copy-pasted from transformers.Trainer to maintain compatibility with earlier versions.
|
| 997 |
+
# This can be removed once the minimum transformers version is updated to 4.47.
|
| 998 |
+
# Refer to https://github.com/huggingface/trl/pull/2288 for more details.
|
| 999 |
+
def _determine_best_metric(self, metrics, trial):
|
| 1000 |
+
"""
|
| 1001 |
+
Determine if the model should be saved based on the evaluation metrics.
|
| 1002 |
+
If args.metric_for_best_model is not set, the loss is used.
|
| 1003 |
+
Returns:
|
| 1004 |
+
bool: True if a new best metric was found, else False
|
| 1005 |
+
"""
|
| 1006 |
+
is_new_best_metric = False
|
| 1007 |
+
|
| 1008 |
+
if self.args.metric_for_best_model is not None:
|
| 1009 |
+
metric_to_check = self.args.metric_for_best_model
|
| 1010 |
+
|
| 1011 |
+
if not metric_to_check.startswith("eval_"):
|
| 1012 |
+
metric_to_check = f"eval_{metric_to_check}"
|
| 1013 |
+
|
| 1014 |
+
try:
|
| 1015 |
+
metric_value = metrics[metric_to_check]
|
| 1016 |
+
except KeyError as exc:
|
| 1017 |
+
raise KeyError(
|
| 1018 |
+
f"The `metric_for_best_model` training argument is set to '{metric_to_check}', which is not found in the evaluation metrics. "
|
| 1019 |
+
f"The available evaluation metrics are: {list(metrics.keys())}. Consider changing the `metric_for_best_model` via the TrainingArguments."
|
| 1020 |
+
) from exc
|
| 1021 |
+
|
| 1022 |
+
operator = np.greater if self.args.greater_is_better else np.less
|
| 1023 |
+
|
| 1024 |
+
if self.state.best_metric is None:
|
| 1025 |
+
self.state.best_metric = float("-inf") if self.args.greater_is_better else float("inf")
|
| 1026 |
+
|
| 1027 |
+
if operator(metric_value, self.state.best_metric):
|
| 1028 |
+
run_dir = self._get_output_dir(trial=trial)
|
| 1029 |
+
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
|
| 1030 |
+
output_dir = os.path.join(run_dir, checkpoint_folder)
|
| 1031 |
+
self.state.best_metric = metric_value
|
| 1032 |
+
self.state.best_model_checkpoint = output_dir
|
| 1033 |
+
|
| 1034 |
+
is_new_best_metric = True
|
| 1035 |
+
|
| 1036 |
+
return is_new_best_metric
|
| 1037 |
+
|
| 1038 |
+
def create_model_card(
|
| 1039 |
+
self,
|
| 1040 |
+
model_name: Optional[str] = None,
|
| 1041 |
+
dataset_name: Optional[str] = None,
|
| 1042 |
+
tags: Union[str, list[str], None] = None,
|
| 1043 |
+
):
|
| 1044 |
+
"""
|
| 1045 |
+
Creates a draft of a model card using the information available to the `Trainer`.
|
| 1046 |
+
|
| 1047 |
+
Args:
|
| 1048 |
+
model_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1049 |
+
Name of the model.
|
| 1050 |
+
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1051 |
+
Name of the dataset used for training.
|
| 1052 |
+
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
| 1053 |
+
Tags to be associated with the model card.
|
| 1054 |
+
"""
|
| 1055 |
+
if not self.is_world_process_zero():
|
| 1056 |
+
return
|
| 1057 |
+
|
| 1058 |
+
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
| 1059 |
+
base_model = self.model.config._name_or_path
|
| 1060 |
+
else:
|
| 1061 |
+
base_model = None
|
| 1062 |
+
|
| 1063 |
+
tags = tags or []
|
| 1064 |
+
if isinstance(tags, str):
|
| 1065 |
+
tags = [tags]
|
| 1066 |
+
|
| 1067 |
+
if hasattr(self.model.config, "unsloth_version"):
|
| 1068 |
+
tags.append("unsloth")
|
| 1069 |
+
|
| 1070 |
+
citation = textwrap.dedent("""\
|
| 1071 |
+
@article{guo2024direct,
|
| 1072 |
+
title = {{Direct Language Model Alignment from Online AI Feedback}},
|
| 1073 |
+
author = {Shangmin Guo and Biao Zhang and Tianlin Liu and Tianqi Liu and Misha Khalman and Felipe Llinares and Alexandre Ram{\'{e}} and Thomas Mesnard and Yao Zhao and Bilal Piot and Johan Ferret and Mathieu Blondel},
|
| 1074 |
+
year = 2024,
|
| 1075 |
+
eprint = {arXiv:2402.04792}
|
| 1076 |
+
}""")
|
| 1077 |
+
|
| 1078 |
+
model_card = generate_model_card(
|
| 1079 |
+
base_model=base_model,
|
| 1080 |
+
model_name=model_name,
|
| 1081 |
+
hub_model_id=self.hub_model_id,
|
| 1082 |
+
dataset_name=dataset_name,
|
| 1083 |
+
tags=tags,
|
| 1084 |
+
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
| 1085 |
+
comet_url=get_comet_experiment_url(),
|
| 1086 |
+
trainer_name="Online DPO",
|
| 1087 |
+
trainer_citation=citation,
|
| 1088 |
+
paper_title="Direct Language Model Alignment from Online AI Feedback",
|
| 1089 |
+
paper_id="2402.04792",
|
| 1090 |
+
)
|
| 1091 |
+
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
| 1092 |
+
class UnslothOnlineDPOTrainer(_UnslothOnlineDPOTrainer):
|
| 1093 |
+
"""
|
| 1094 |
+
|
| 1095 |
+
Initialize OnlineDPOTrainer.
|
| 1096 |
+
|
| 1097 |
+
Args:
|
| 1098 |
+
model (`transformers.PreTrainedModel` or `torch.nn.Module`):
|
| 1099 |
+
The model to train, preferably an `AutoModelForCausalLM`.
|
| 1100 |
+
ref_model (`transformers.PreTrainedModel` or `torch.nn.Module` or `None`):
|
| 1101 |
+
The reference model to use for training. If None is specified, the reference model will be created from
|
| 1102 |
+
the model.
|
| 1103 |
+
reward_model (`transformers.PreTrainedModel` or `torch.nn.Module` or `None`):
|
| 1104 |
+
The reward model to score completions with, preferably an `AutoModelForSequenceClassification`.
|
| 1105 |
+
judge (`BasePairwiseJudge`):
|
| 1106 |
+
The judge to use for pairwise comparison of model completions.
|
| 1107 |
+
args (`OnlineDPOConfig`):
|
| 1108 |
+
The online DPO config arguments to use for training.
|
| 1109 |
+
data_collator (`transformers.DataCollator`):
|
| 1110 |
+
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
|
| 1111 |
+
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
|
| 1112 |
+
train_dataset (`datasets.Dataset`):
|
| 1113 |
+
The dataset to use for training.
|
| 1114 |
+
eval_dataset (`datasets.Dataset`):
|
| 1115 |
+
The dataset to use for evaluation.
|
| 1116 |
+
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
|
| 1117 |
+
Processing class used to process the data. If provided, will be used to automatically process the inputs
|
| 1118 |
+
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
|
| 1119 |
+
reuse the fine-tuned model.
|
| 1120 |
+
peft_config (`dict`):
|
| 1121 |
+
The peft config to use for training.
|
| 1122 |
+
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
|
| 1123 |
+
The function to use to compute the metrics. Must take a `EvalPrediction` and return
|
| 1124 |
+
a dictionary string to metric values.
|
| 1125 |
+
callbacks (`list[transformers.TrainerCallback]`):
|
| 1126 |
+
The callbacks to use for training.
|
| 1127 |
+
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
|
| 1128 |
+
The optimizer and scheduler to use for training.
|
| 1129 |
+
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
|
| 1130 |
+
The function to use to preprocess the logits before computing the metrics.
|
| 1131 |
+
|
| 1132 |
+
"""
|
| 1133 |
+
def __init__(
|
| 1134 |
+
self,
|
| 1135 |
+
model,
|
| 1136 |
+
ref_model = None,
|
| 1137 |
+
reward_model = None,
|
| 1138 |
+
judge = None,
|
| 1139 |
+
args = None,
|
| 1140 |
+
data_collator = None,
|
| 1141 |
+
train_dataset = None,
|
| 1142 |
+
eval_dataset = None,
|
| 1143 |
+
processing_class = None,
|
| 1144 |
+
reward_processing_class = None,
|
| 1145 |
+
peft_config = None,
|
| 1146 |
+
compute_metrics = None,
|
| 1147 |
+
callbacks = None,
|
| 1148 |
+
preprocess_logits_for_metrics = None,
|
| 1149 |
+
**kwargs
|
| 1150 |
+
):
|
| 1151 |
+
if args is None: args = UnslothOnlineDPOConfig()
|
| 1152 |
+
use_bf16 = getattr(args, 'bf16', False)
|
| 1153 |
+
use_fp16 = getattr(args, 'fp16', False)
|
| 1154 |
+
force_float32 = False
|
| 1155 |
+
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
| 1156 |
+
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
| 1157 |
+
force_float32 = True
|
| 1158 |
+
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
| 1159 |
+
dtype = getattr(model.config, 'torch_dtype', None)
|
| 1160 |
+
if dtype is None: dtype = model.get_input_embeddings().dtype
|
| 1161 |
+
from unsloth_zoo.utils import _get_dtype
|
| 1162 |
+
dtype = _get_dtype(dtype)
|
| 1163 |
+
float16 = dtype == torch.float16
|
| 1164 |
+
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
| 1165 |
+
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
| 1166 |
+
if force_float32:
|
| 1167 |
+
args.fp16 = False
|
| 1168 |
+
args.bf16 = False
|
| 1169 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
| 1170 |
+
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
| 1171 |
+
args.fp16 = float16
|
| 1172 |
+
args.bf16 = not float16
|
| 1173 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
| 1174 |
+
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
| 1175 |
+
args.eval_strategy = 'steps'
|
| 1176 |
+
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
| 1177 |
+
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
| 1178 |
+
if ga_steps is not None and ga_steps > 1:
|
| 1179 |
+
from transformers import __version__ as transformers_version
|
| 1180 |
+
if Version(transformers_version) <= Version('4.45.2'):
|
| 1181 |
+
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
| 1182 |
+
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
| 1183 |
+
if getattr(args, 'eval_strategy', 'no') != 'no':
|
| 1184 |
+
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
| 1185 |
+
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
| 1186 |
+
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
| 1187 |
+
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
| 1188 |
+
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
| 1189 |
+
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
| 1190 |
+
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
| 1191 |
+
if force_float32:
|
| 1192 |
+
args.bf16_full_eval = False
|
| 1193 |
+
args.fp16_full_eval = False
|
| 1194 |
+
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
| 1195 |
+
args.bf16_full_eval = True
|
| 1196 |
+
args.fp16_full_eval = False
|
| 1197 |
+
elif not bf16_full_eval and not fp16_full_eval:
|
| 1198 |
+
args.bf16_full_eval = args.bf16
|
| 1199 |
+
args.fp16_full_eval = args.fp16
|
| 1200 |
+
_output_logits = False
|
| 1201 |
+
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
| 1202 |
+
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
| 1203 |
+
if _output_logits:
|
| 1204 |
+
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
| 1205 |
+
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
| 1206 |
+
pass
|
| 1207 |
+
else:
|
| 1208 |
+
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
| 1209 |
+
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
| 1210 |
+
if args_max_seq_length is None and model_max_seq_length is not None:
|
| 1211 |
+
max_seq_length = model.max_seq_length
|
| 1212 |
+
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
| 1213 |
+
if model is not None and hasattr(model, 'for_training'):
|
| 1214 |
+
model.for_training()
|
| 1215 |
+
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
| 1216 |
+
if 'processing_class' in locals():
|
| 1217 |
+
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
| 1218 |
+
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
| 1219 |
+
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
| 1220 |
+
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
| 1221 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1222 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
| 1223 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer, mlm = False)
|
| 1224 |
+
elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
| 1225 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer)
|
| 1226 |
+
else:
|
| 1227 |
+
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
| 1228 |
+
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
| 1229 |
+
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
| 1230 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1231 |
+
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
| 1232 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
| 1233 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer)
|
| 1234 |
+
else:
|
| 1235 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False)
|
| 1236 |
+
other_metrics = []
|
| 1237 |
+
|
| 1238 |
+
from unsloth_zoo.logging_utils import PatchRLStatistics
|
| 1239 |
+
PatchRLStatistics('online_dpo_trainer', other_metrics)
|
| 1240 |
+
|
| 1241 |
+
super().__init__(
|
| 1242 |
+
model = model,
|
| 1243 |
+
ref_model = ref_model,
|
| 1244 |
+
reward_model = reward_model,
|
| 1245 |
+
judge = judge,
|
| 1246 |
+
args = args,
|
| 1247 |
+
data_collator = data_collator,
|
| 1248 |
+
train_dataset = train_dataset,
|
| 1249 |
+
eval_dataset = eval_dataset,
|
| 1250 |
+
processing_class = processing_class,
|
| 1251 |
+
reward_processing_class = reward_processing_class,
|
| 1252 |
+
peft_config = peft_config,
|
| 1253 |
+
compute_metrics = compute_metrics,
|
| 1254 |
+
callbacks = callbacks,
|
| 1255 |
+
preprocess_logits_for_metrics = preprocess_logits_for_metrics,**kwargs)
|
| 1256 |
+
if hasattr(self, 'neftune_hook_handle'):
|
| 1257 |
+
self.neftune_hook_handle.remove()
|
| 1258 |
+
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
| 1259 |
+
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
| 1260 |
+
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
| 1261 |
+
pass
|
| 1262 |
+
|
| 1263 |
+
pass
|
unsloth_compiled_cache/UnslothPPOTrainer.py
ADDED
|
@@ -0,0 +1,1253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
2025.4.1
|
| 3 |
+
2025.4.1
|
| 4 |
+
4.51.3
|
| 5 |
+
0.15.2
|
| 6 |
+
__UNSLOTH_VERSIONING__
|
| 7 |
+
"""
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from torch.nn import functional as F
|
| 12 |
+
from trl.trainer.ppo_trainer import (Accelerator, BaseImageProcessor, CallbackHandler, DEFAULT_CALLBACKS, DEFAULT_PROGRESS_CALLBACK, DataCollatorWithPadding, DataLoader, Dataset, ExportableState, FeatureExtractionMixin, GenerationConfig, INVALID_LOGPROB, OnlineTrainerState, Optional, PPOConfig, PPOTrainer, PeftConfig, PeftModel, PolicyAndValueWrapper, PreTrainedTokenizerBase, PrinterCallback, ProcessorMixin, Trainer, TrainerCallback, TrainerControl, Union, batch_generation, broadcast, contextmanager, create_reference_model, defaultdict, disable_dropout_in_model, exact_div, first_true_indices, forward, gather_object, gc, generate_model_card, get_comet_experiment_url, get_peft_model, get_reporting_integration_callbacks, get_reward, is_peft_available, is_wandb_available, log_table_to_comet_experiment, masked_mean, masked_whiten, math, nn, np, nullcontext, os, pd, peft_module_casting_to_bf16, prepare_deepspeed, print_rich_table, textwrap, time, torch, truncate_response, unwrap_model_for_generation)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from typing import *
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from packaging.version import Version
|
| 19 |
+
import torch
|
| 20 |
+
import numpy as np
|
| 21 |
+
from contextlib import nullcontext
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling
|
| 24 |
+
|
| 25 |
+
torch_compile_options = {
|
| 26 |
+
"epilogue_fusion" : True,
|
| 27 |
+
"max_autotune" : False,
|
| 28 |
+
"shape_padding" : True,
|
| 29 |
+
"trace.enabled" : False,
|
| 30 |
+
"triton.cudagraphs" : False,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
| 34 |
+
def selective_log_softmax(logits, index):
|
| 35 |
+
logits = logits.to(torch.float32)
|
| 36 |
+
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
| 37 |
+
# loop to reduce peak mem consumption
|
| 38 |
+
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
| 39 |
+
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
| 40 |
+
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
| 41 |
+
return per_token_logps
|
| 42 |
+
@dataclass
|
| 43 |
+
class UnslothPPOConfig(PPOConfig):
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
Configuration class for the [`PPOTrainer`].
|
| 47 |
+
|
| 48 |
+
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
| 49 |
+
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
| 50 |
+
command line.
|
| 51 |
+
|
| 52 |
+
Parameters:
|
| 53 |
+
exp_name (`str`, *optional*, defaults to `os.path.basename(__file__)[:-3]`):
|
| 54 |
+
Name of this experiment.
|
| 55 |
+
reward_model_path (`str`, *optional*, defaults to `"EleutherAI/pythia-160m"`):
|
| 56 |
+
Path to the reward model.
|
| 57 |
+
model_adapter_name (`str` or `None`, *optional*, defaults to `None`):
|
| 58 |
+
Name of the train target PEFT adapter, when using LoRA with multiple adapters.
|
| 59 |
+
ref_adapter_name (`str` or `None`, *optional*, defaults to `None`):
|
| 60 |
+
Name of the reference PEFT adapter, when using LoRA with multiple adapters.
|
| 61 |
+
num_ppo_epochs (`int`, *optional*, defaults to `4`):
|
| 62 |
+
Number of epochs to train.
|
| 63 |
+
whiten_rewards (`bool`, *optional*, defaults to `False`):
|
| 64 |
+
Whether to whiten the rewards.
|
| 65 |
+
kl_coef (`float`, *optional*, defaults to `0.05`):
|
| 66 |
+
KL coefficient.
|
| 67 |
+
cliprange (`float`, *optional*, defaults to `0.2`):
|
| 68 |
+
Clip range.
|
| 69 |
+
vf_coef (`float`, *optional*, defaults to `0.1`):
|
| 70 |
+
Value function coefficient.
|
| 71 |
+
cliprange_value (`float`, *optional*, defaults to `0.2`):
|
| 72 |
+
Clip range for the value function.
|
| 73 |
+
gamma (`float`, *optional*, defaults to `1.0`):
|
| 74 |
+
Discount factor.
|
| 75 |
+
lam (`float`, *optional*, defaults to `0.95`):
|
| 76 |
+
Lambda value for GAE.
|
| 77 |
+
ds3_gather_for_generation (`bool`, *optional*, defaults to `True`):
|
| 78 |
+
This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation,
|
| 79 |
+
improving generation speed. However, disabling this option allows training models that exceed the VRAM
|
| 80 |
+
capacity of a single GPU, albeit at the cost of slower generation.
|
| 81 |
+
|
| 82 |
+
"""
|
| 83 |
+
vllm_sampling_params: Optional[Any] = field(
|
| 84 |
+
default = None,
|
| 85 |
+
metadata = {'help': 'vLLM SamplingParams'},
|
| 86 |
+
)
|
| 87 |
+
unsloth_num_chunks : Optional[int] = field(
|
| 88 |
+
default = -1,
|
| 89 |
+
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
| 90 |
+
)
|
| 91 |
+
def __init__(
|
| 92 |
+
self,
|
| 93 |
+
output_dir = None,
|
| 94 |
+
overwrite_output_dir = None,
|
| 95 |
+
do_train = False,
|
| 96 |
+
do_eval = False,
|
| 97 |
+
do_predict = False,
|
| 98 |
+
eval_strategy = 'no',
|
| 99 |
+
prediction_loss_only = False,
|
| 100 |
+
per_device_train_batch_size = 4,
|
| 101 |
+
per_device_eval_batch_size = 4,
|
| 102 |
+
per_gpu_train_batch_size = None,
|
| 103 |
+
per_gpu_eval_batch_size = None,
|
| 104 |
+
gradient_accumulation_steps = 2,
|
| 105 |
+
eval_accumulation_steps = 2,
|
| 106 |
+
eval_delay = 0,
|
| 107 |
+
torch_empty_cache_steps = 250,
|
| 108 |
+
learning_rate = 5e-05,
|
| 109 |
+
weight_decay = 0.01,
|
| 110 |
+
adam_beta1 = 0.9,
|
| 111 |
+
adam_beta2 = 0.999,
|
| 112 |
+
adam_epsilon = 1e-08,
|
| 113 |
+
max_grad_norm = 1.0,
|
| 114 |
+
num_train_epochs = 3.0,
|
| 115 |
+
max_steps = -1,
|
| 116 |
+
lr_scheduler_type = 'linear',
|
| 117 |
+
warmup_ratio = 0.1,
|
| 118 |
+
warmup_steps = 0,
|
| 119 |
+
log_level = 'passive',
|
| 120 |
+
log_level_replica = 'warning',
|
| 121 |
+
log_on_each_node = True,
|
| 122 |
+
logging_dir = None,
|
| 123 |
+
logging_strategy = 'steps',
|
| 124 |
+
logging_first_step = False,
|
| 125 |
+
logging_steps = 1,
|
| 126 |
+
logging_nan_inf_filter = False,
|
| 127 |
+
save_strategy = 'steps',
|
| 128 |
+
save_steps = 500,
|
| 129 |
+
save_total_limit = None,
|
| 130 |
+
save_safetensors = True,
|
| 131 |
+
save_on_each_node = False,
|
| 132 |
+
save_only_model = False,
|
| 133 |
+
restore_callback_states_from_checkpoint = False,
|
| 134 |
+
no_cuda = False,
|
| 135 |
+
use_cpu = False,
|
| 136 |
+
use_mps_device = False,
|
| 137 |
+
seed = 3407,
|
| 138 |
+
data_seed = 3407,
|
| 139 |
+
jit_mode_eval = False,
|
| 140 |
+
use_ipex = False,
|
| 141 |
+
bf16 = False,
|
| 142 |
+
fp16 = False,
|
| 143 |
+
fp16_opt_level = 'O1',
|
| 144 |
+
half_precision_backend = 'auto',
|
| 145 |
+
bf16_full_eval = False,
|
| 146 |
+
fp16_full_eval = False,
|
| 147 |
+
tf32 = None,
|
| 148 |
+
local_rank = -1,
|
| 149 |
+
ddp_backend = None,
|
| 150 |
+
tpu_num_cores = None,
|
| 151 |
+
tpu_metrics_debug = False,
|
| 152 |
+
debug = '',
|
| 153 |
+
dataloader_drop_last = False,
|
| 154 |
+
eval_steps = None,
|
| 155 |
+
dataloader_num_workers = 0,
|
| 156 |
+
dataloader_prefetch_factor = None,
|
| 157 |
+
past_index = -1,
|
| 158 |
+
run_name = None,
|
| 159 |
+
disable_tqdm = None,
|
| 160 |
+
remove_unused_columns = True,
|
| 161 |
+
label_names = None,
|
| 162 |
+
load_best_model_at_end = False,
|
| 163 |
+
metric_for_best_model = None,
|
| 164 |
+
greater_is_better = None,
|
| 165 |
+
ignore_data_skip = False,
|
| 166 |
+
fsdp = '',
|
| 167 |
+
fsdp_min_num_params = 0,
|
| 168 |
+
fsdp_config = None,
|
| 169 |
+
tp_size = 0,
|
| 170 |
+
fsdp_transformer_layer_cls_to_wrap = None,
|
| 171 |
+
accelerator_config = None,
|
| 172 |
+
deepspeed = None,
|
| 173 |
+
label_smoothing_factor = 0.0,
|
| 174 |
+
optim = 'adamw_8bit',
|
| 175 |
+
optim_args = None,
|
| 176 |
+
adafactor = False,
|
| 177 |
+
group_by_length = False,
|
| 178 |
+
length_column_name = 'length',
|
| 179 |
+
report_to = None,
|
| 180 |
+
ddp_find_unused_parameters = None,
|
| 181 |
+
ddp_bucket_cap_mb = None,
|
| 182 |
+
ddp_broadcast_buffers = None,
|
| 183 |
+
dataloader_pin_memory = True,
|
| 184 |
+
dataloader_persistent_workers = False,
|
| 185 |
+
skip_memory_metrics = True,
|
| 186 |
+
use_legacy_prediction_loop = False,
|
| 187 |
+
push_to_hub = False,
|
| 188 |
+
resume_from_checkpoint = None,
|
| 189 |
+
hub_model_id = None,
|
| 190 |
+
hub_strategy = 'every_save',
|
| 191 |
+
hub_token = None,
|
| 192 |
+
hub_private_repo = None,
|
| 193 |
+
hub_always_push = False,
|
| 194 |
+
gradient_checkpointing = False,
|
| 195 |
+
gradient_checkpointing_kwargs = None,
|
| 196 |
+
include_inputs_for_metrics = False,
|
| 197 |
+
eval_do_concat_batches = True,
|
| 198 |
+
fp16_backend = 'auto',
|
| 199 |
+
push_to_hub_model_id = None,
|
| 200 |
+
push_to_hub_organization = None,
|
| 201 |
+
push_to_hub_token = None,
|
| 202 |
+
mp_parameters = '',
|
| 203 |
+
auto_find_batch_size = False,
|
| 204 |
+
full_determinism = False,
|
| 205 |
+
torchdynamo = None,
|
| 206 |
+
ray_scope = 'last',
|
| 207 |
+
ddp_timeout = 1800,
|
| 208 |
+
torch_compile = False,
|
| 209 |
+
torch_compile_backend = None,
|
| 210 |
+
torch_compile_mode = None,
|
| 211 |
+
include_tokens_per_second = False,
|
| 212 |
+
include_num_input_tokens_seen = False,
|
| 213 |
+
neftune_noise_alpha = None,
|
| 214 |
+
optim_target_modules = None,
|
| 215 |
+
batch_eval_metrics = False,
|
| 216 |
+
eval_on_start = False,
|
| 217 |
+
use_liger_kernel = False,
|
| 218 |
+
eval_use_gather_object = False,
|
| 219 |
+
average_tokens_across_devices = False,
|
| 220 |
+
dataset_num_proc = None,
|
| 221 |
+
num_mini_batches = 1,
|
| 222 |
+
total_episodes = None,
|
| 223 |
+
local_rollout_forward_batch_size = 64,
|
| 224 |
+
num_sample_generations = 10,
|
| 225 |
+
response_length = 53,
|
| 226 |
+
stop_token = None,
|
| 227 |
+
stop_token_id = None,
|
| 228 |
+
temperature = 0.7,
|
| 229 |
+
missing_eos_penalty = None,
|
| 230 |
+
sft_model_path = 'EleutherAI/pythia-160m',
|
| 231 |
+
world_size = None,
|
| 232 |
+
num_total_batches = None,
|
| 233 |
+
micro_batch_size = None,
|
| 234 |
+
local_batch_size = None,
|
| 235 |
+
batch_size = None,
|
| 236 |
+
local_mini_batch_size = None,
|
| 237 |
+
mini_batch_size = None,
|
| 238 |
+
exp_name = 'ppo_config',
|
| 239 |
+
reward_model_path = 'EleutherAI/pythia-160m',
|
| 240 |
+
model_adapter_name = None,
|
| 241 |
+
ref_adapter_name = None,
|
| 242 |
+
num_ppo_epochs = 4,
|
| 243 |
+
whiten_rewards = False,
|
| 244 |
+
kl_coef = 0.05,
|
| 245 |
+
cliprange = 0.2,
|
| 246 |
+
vf_coef = 0.1,
|
| 247 |
+
cliprange_value = 0.2,
|
| 248 |
+
gamma = 1.0,
|
| 249 |
+
lam = 0.95,
|
| 250 |
+
ds3_gather_for_generation = True,
|
| 251 |
+
vllm_sampling_params = None,
|
| 252 |
+
unsloth_num_chunks = -1,
|
| 253 |
+
**kwargs,
|
| 254 |
+
):
|
| 255 |
+
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
| 256 |
+
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
| 257 |
+
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
| 258 |
+
output_dir = 'unsloth_training_checkpoints'
|
| 259 |
+
save_strategy = 'no'
|
| 260 |
+
if dataset_num_proc is None:
|
| 261 |
+
from multiprocessing import cpu_count
|
| 262 |
+
dataset_num_proc = cpu_count()
|
| 263 |
+
|
| 264 |
+
super().__init__(
|
| 265 |
+
output_dir = output_dir,
|
| 266 |
+
overwrite_output_dir = overwrite_output_dir,
|
| 267 |
+
do_train = do_train,
|
| 268 |
+
do_eval = do_eval,
|
| 269 |
+
do_predict = do_predict,
|
| 270 |
+
eval_strategy = eval_strategy,
|
| 271 |
+
prediction_loss_only = prediction_loss_only,
|
| 272 |
+
per_device_train_batch_size = per_device_train_batch_size,
|
| 273 |
+
per_device_eval_batch_size = per_device_eval_batch_size,
|
| 274 |
+
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
| 275 |
+
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
| 276 |
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 277 |
+
eval_accumulation_steps = eval_accumulation_steps,
|
| 278 |
+
eval_delay = eval_delay,
|
| 279 |
+
torch_empty_cache_steps = torch_empty_cache_steps,
|
| 280 |
+
learning_rate = learning_rate,
|
| 281 |
+
weight_decay = weight_decay,
|
| 282 |
+
adam_beta1 = adam_beta1,
|
| 283 |
+
adam_beta2 = adam_beta2,
|
| 284 |
+
adam_epsilon = adam_epsilon,
|
| 285 |
+
max_grad_norm = max_grad_norm,
|
| 286 |
+
num_train_epochs = num_train_epochs,
|
| 287 |
+
max_steps = max_steps,
|
| 288 |
+
lr_scheduler_type = lr_scheduler_type,
|
| 289 |
+
warmup_ratio = warmup_ratio,
|
| 290 |
+
warmup_steps = warmup_steps,
|
| 291 |
+
log_level = log_level,
|
| 292 |
+
log_level_replica = log_level_replica,
|
| 293 |
+
log_on_each_node = log_on_each_node,
|
| 294 |
+
logging_dir = logging_dir,
|
| 295 |
+
logging_strategy = logging_strategy,
|
| 296 |
+
logging_first_step = logging_first_step,
|
| 297 |
+
logging_steps = logging_steps,
|
| 298 |
+
logging_nan_inf_filter = logging_nan_inf_filter,
|
| 299 |
+
save_strategy = save_strategy,
|
| 300 |
+
save_steps = save_steps,
|
| 301 |
+
save_total_limit = save_total_limit,
|
| 302 |
+
save_safetensors = save_safetensors,
|
| 303 |
+
save_on_each_node = save_on_each_node,
|
| 304 |
+
save_only_model = save_only_model,
|
| 305 |
+
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
| 306 |
+
no_cuda = no_cuda,
|
| 307 |
+
use_cpu = use_cpu,
|
| 308 |
+
use_mps_device = use_mps_device,
|
| 309 |
+
seed = seed,
|
| 310 |
+
data_seed = data_seed,
|
| 311 |
+
jit_mode_eval = jit_mode_eval,
|
| 312 |
+
use_ipex = use_ipex,
|
| 313 |
+
bf16 = bf16,
|
| 314 |
+
fp16 = fp16,
|
| 315 |
+
fp16_opt_level = fp16_opt_level,
|
| 316 |
+
half_precision_backend = half_precision_backend,
|
| 317 |
+
bf16_full_eval = bf16_full_eval,
|
| 318 |
+
fp16_full_eval = fp16_full_eval,
|
| 319 |
+
tf32 = tf32,
|
| 320 |
+
local_rank = local_rank,
|
| 321 |
+
ddp_backend = ddp_backend,
|
| 322 |
+
tpu_num_cores = tpu_num_cores,
|
| 323 |
+
tpu_metrics_debug = tpu_metrics_debug,
|
| 324 |
+
debug = debug,
|
| 325 |
+
dataloader_drop_last = dataloader_drop_last,
|
| 326 |
+
eval_steps = eval_steps,
|
| 327 |
+
dataloader_num_workers = dataloader_num_workers,
|
| 328 |
+
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
| 329 |
+
past_index = past_index,
|
| 330 |
+
run_name = run_name,
|
| 331 |
+
disable_tqdm = disable_tqdm,
|
| 332 |
+
remove_unused_columns = remove_unused_columns,
|
| 333 |
+
label_names = label_names,
|
| 334 |
+
load_best_model_at_end = load_best_model_at_end,
|
| 335 |
+
metric_for_best_model = metric_for_best_model,
|
| 336 |
+
greater_is_better = greater_is_better,
|
| 337 |
+
ignore_data_skip = ignore_data_skip,
|
| 338 |
+
fsdp = fsdp,
|
| 339 |
+
fsdp_min_num_params = fsdp_min_num_params,
|
| 340 |
+
fsdp_config = fsdp_config,
|
| 341 |
+
tp_size = tp_size,
|
| 342 |
+
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
| 343 |
+
accelerator_config = accelerator_config,
|
| 344 |
+
deepspeed = deepspeed,
|
| 345 |
+
label_smoothing_factor = label_smoothing_factor,
|
| 346 |
+
optim = optim,
|
| 347 |
+
optim_args = optim_args,
|
| 348 |
+
adafactor = adafactor,
|
| 349 |
+
group_by_length = group_by_length,
|
| 350 |
+
length_column_name = length_column_name,
|
| 351 |
+
report_to = report_to,
|
| 352 |
+
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
| 353 |
+
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
| 354 |
+
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
| 355 |
+
dataloader_pin_memory = dataloader_pin_memory,
|
| 356 |
+
dataloader_persistent_workers = dataloader_persistent_workers,
|
| 357 |
+
skip_memory_metrics = skip_memory_metrics,
|
| 358 |
+
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
| 359 |
+
push_to_hub = push_to_hub,
|
| 360 |
+
resume_from_checkpoint = resume_from_checkpoint,
|
| 361 |
+
hub_model_id = hub_model_id,
|
| 362 |
+
hub_strategy = hub_strategy,
|
| 363 |
+
hub_token = hub_token,
|
| 364 |
+
hub_private_repo = hub_private_repo,
|
| 365 |
+
hub_always_push = hub_always_push,
|
| 366 |
+
gradient_checkpointing = gradient_checkpointing,
|
| 367 |
+
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
| 368 |
+
include_inputs_for_metrics = include_inputs_for_metrics,
|
| 369 |
+
eval_do_concat_batches = eval_do_concat_batches,
|
| 370 |
+
fp16_backend = fp16_backend,
|
| 371 |
+
push_to_hub_model_id = push_to_hub_model_id,
|
| 372 |
+
push_to_hub_organization = push_to_hub_organization,
|
| 373 |
+
push_to_hub_token = push_to_hub_token,
|
| 374 |
+
mp_parameters = mp_parameters,
|
| 375 |
+
auto_find_batch_size = auto_find_batch_size,
|
| 376 |
+
full_determinism = full_determinism,
|
| 377 |
+
torchdynamo = torchdynamo,
|
| 378 |
+
ray_scope = ray_scope,
|
| 379 |
+
ddp_timeout = ddp_timeout,
|
| 380 |
+
torch_compile = torch_compile,
|
| 381 |
+
torch_compile_backend = torch_compile_backend,
|
| 382 |
+
torch_compile_mode = torch_compile_mode,
|
| 383 |
+
include_tokens_per_second = include_tokens_per_second,
|
| 384 |
+
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
| 385 |
+
neftune_noise_alpha = neftune_noise_alpha,
|
| 386 |
+
optim_target_modules = optim_target_modules,
|
| 387 |
+
batch_eval_metrics = batch_eval_metrics,
|
| 388 |
+
eval_on_start = eval_on_start,
|
| 389 |
+
use_liger_kernel = use_liger_kernel,
|
| 390 |
+
eval_use_gather_object = eval_use_gather_object,
|
| 391 |
+
average_tokens_across_devices = average_tokens_across_devices,
|
| 392 |
+
dataset_num_proc = dataset_num_proc,
|
| 393 |
+
num_mini_batches = num_mini_batches,
|
| 394 |
+
total_episodes = total_episodes,
|
| 395 |
+
local_rollout_forward_batch_size = local_rollout_forward_batch_size,
|
| 396 |
+
num_sample_generations = num_sample_generations,
|
| 397 |
+
response_length = response_length,
|
| 398 |
+
stop_token = stop_token,
|
| 399 |
+
stop_token_id = stop_token_id,
|
| 400 |
+
temperature = temperature,
|
| 401 |
+
missing_eos_penalty = missing_eos_penalty,
|
| 402 |
+
sft_model_path = sft_model_path,
|
| 403 |
+
world_size = world_size,
|
| 404 |
+
num_total_batches = num_total_batches,
|
| 405 |
+
micro_batch_size = micro_batch_size,
|
| 406 |
+
local_batch_size = local_batch_size,
|
| 407 |
+
batch_size = batch_size,
|
| 408 |
+
local_mini_batch_size = local_mini_batch_size,
|
| 409 |
+
mini_batch_size = mini_batch_size,
|
| 410 |
+
exp_name = exp_name,
|
| 411 |
+
reward_model_path = reward_model_path,
|
| 412 |
+
model_adapter_name = model_adapter_name,
|
| 413 |
+
ref_adapter_name = ref_adapter_name,
|
| 414 |
+
num_ppo_epochs = num_ppo_epochs,
|
| 415 |
+
whiten_rewards = whiten_rewards,
|
| 416 |
+
kl_coef = kl_coef,
|
| 417 |
+
cliprange = cliprange,
|
| 418 |
+
vf_coef = vf_coef,
|
| 419 |
+
cliprange_value = cliprange_value,
|
| 420 |
+
gamma = gamma,
|
| 421 |
+
lam = lam,
|
| 422 |
+
ds3_gather_for_generation = ds3_gather_for_generation,**kwargs)
|
| 423 |
+
self.vllm_sampling_params = vllm_sampling_params
|
| 424 |
+
self.unsloth_num_chunks = unsloth_num_chunks
|
| 425 |
+
pass
|
| 426 |
+
|
| 427 |
+
class _UnslothPPOTrainer(Trainer):
|
| 428 |
+
_tag_names = ["trl", "ppo"]
|
| 429 |
+
|
| 430 |
+
def __init__(
|
| 431 |
+
self,
|
| 432 |
+
args: PPOConfig,
|
| 433 |
+
processing_class: Optional[
|
| 434 |
+
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
|
| 435 |
+
],
|
| 436 |
+
model: nn.Module,
|
| 437 |
+
ref_model: Optional[nn.Module],
|
| 438 |
+
reward_model: nn.Module,
|
| 439 |
+
train_dataset: Dataset,
|
| 440 |
+
value_model: Optional[nn.Module] = None,
|
| 441 |
+
data_collator: Optional[DataCollatorWithPadding] = None,
|
| 442 |
+
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
|
| 443 |
+
# less commonly used
|
| 444 |
+
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
| 445 |
+
callbacks: Optional[list[TrainerCallback]] = None,
|
| 446 |
+
peft_config: Optional["PeftConfig"] = None,
|
| 447 |
+
) -> None:
|
| 448 |
+
if ref_model is model:
|
| 449 |
+
raise ValueError(
|
| 450 |
+
"`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the "
|
| 451 |
+
"same as `model`, you must make a copy of it, or `None` if you use peft."
|
| 452 |
+
)
|
| 453 |
+
|
| 454 |
+
self.args = args
|
| 455 |
+
self.processing_class = processing_class
|
| 456 |
+
self.policy_model = model
|
| 457 |
+
|
| 458 |
+
# Define the collator if not provided
|
| 459 |
+
if data_collator is None:
|
| 460 |
+
data_collator = DataCollatorWithPadding(self.processing_class)
|
| 461 |
+
|
| 462 |
+
# Handle stop token settings: update policy model's generation_config to use provided stop token
|
| 463 |
+
if args.stop_token and args.stop_token_id:
|
| 464 |
+
raise ValueError("You cannot set both `stop_token` and `stop_token_id`.")
|
| 465 |
+
elif args.stop_token:
|
| 466 |
+
if args.stop_token == "eos":
|
| 467 |
+
self.policy_model.generation_config.eos_token_id = self.stop_token_id = processing_class.eos_token_id
|
| 468 |
+
else:
|
| 469 |
+
raise ValueError(
|
| 470 |
+
f"Unknown `stop_token` {args.stop_token}. Allowed values are: `'eos'` and `None` (no stop token)."
|
| 471 |
+
)
|
| 472 |
+
else:
|
| 473 |
+
self.policy_model.generation_config.eos_token_id = self.stop_token_id = args.stop_token_id # None or int
|
| 474 |
+
|
| 475 |
+
# peft support
|
| 476 |
+
if not is_peft_available() and peft_config is not None:
|
| 477 |
+
raise ImportError(
|
| 478 |
+
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
|
| 479 |
+
)
|
| 480 |
+
elif is_peft_available() and peft_config is not None:
|
| 481 |
+
# if model is a peft model and we have a peft_confg, we merge and unload it first
|
| 482 |
+
if isinstance(self.policy_model, PeftModel):
|
| 483 |
+
self.policy_model = self.policy_model.merge_and_unload()
|
| 484 |
+
|
| 485 |
+
# get peft model with the given config
|
| 486 |
+
self.policy_model = get_peft_model(self.policy_model, peft_config)
|
| 487 |
+
if args.bf16 and getattr(self.policy_model, "is_loaded_in_4bit", False):
|
| 488 |
+
peft_module_casting_to_bf16(self.policy_model)
|
| 489 |
+
|
| 490 |
+
self.is_peft_model = is_peft_available() and isinstance(self.policy_model, PeftModel)
|
| 491 |
+
self.model_adapter_name = args.model_adapter_name
|
| 492 |
+
self.ref_adapter_name = args.ref_adapter_name
|
| 493 |
+
|
| 494 |
+
if ref_model:
|
| 495 |
+
self.ref_model = ref_model
|
| 496 |
+
elif self.is_peft_model:
|
| 497 |
+
self.ref_model = None
|
| 498 |
+
else:
|
| 499 |
+
self.ref_model = create_reference_model(self.policy_model)
|
| 500 |
+
|
| 501 |
+
self.reward_model = reward_model
|
| 502 |
+
self.train_dataset = train_dataset
|
| 503 |
+
self.train_dataset_len = len(train_dataset)
|
| 504 |
+
self.value_model = value_model
|
| 505 |
+
self.data_collator = data_collator
|
| 506 |
+
self.eval_dataset = eval_dataset
|
| 507 |
+
self.optimizer, self.lr_scheduler = optimizers
|
| 508 |
+
self.optimizer_cls_and_kwargs = None # needed for transformers >= 4.47
|
| 509 |
+
|
| 510 |
+
#########
|
| 511 |
+
# calculate various batch sizes
|
| 512 |
+
#########
|
| 513 |
+
if args.total_episodes is None: # allow the users to define episodes in terms of epochs.
|
| 514 |
+
args.total_episodes = int(args.num_train_epochs * self.train_dataset_len)
|
| 515 |
+
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps)
|
| 516 |
+
self.accelerator = accelerator
|
| 517 |
+
args.world_size = accelerator.num_processes
|
| 518 |
+
args.local_batch_size = (
|
| 519 |
+
args.per_device_train_batch_size * args.gradient_accumulation_steps * args.num_mini_batches
|
| 520 |
+
)
|
| 521 |
+
args.micro_batch_size = int(args.per_device_train_batch_size * args.world_size)
|
| 522 |
+
args.batch_size = int(args.local_batch_size * args.world_size)
|
| 523 |
+
args.mini_batch_size = exact_div(
|
| 524 |
+
args.batch_size, args.num_mini_batches, "`batch_size` must be a multiple of `num_mini_batches`"
|
| 525 |
+
)
|
| 526 |
+
args.local_mini_batch_size = exact_div(
|
| 527 |
+
args.local_batch_size, args.num_mini_batches, "`local_batch_size` must be a multiple of `num_mini_batches`"
|
| 528 |
+
)
|
| 529 |
+
if args.whiten_rewards:
|
| 530 |
+
assert (
|
| 531 |
+
args.local_mini_batch_size >= 8
|
| 532 |
+
), f"Per-rank minibatch size {args.local_mini_batch_size} is insufficient for whitening"
|
| 533 |
+
# `per_rank_rollout_batch_size` is our `args.local_batch_size`
|
| 534 |
+
# `per_rank_minibatch_size` is our `args.local_mini_batch_size`
|
| 535 |
+
args.num_total_batches = math.ceil(
|
| 536 |
+
args.total_episodes / args.batch_size
|
| 537 |
+
) # we may train for more than `total_episodes`
|
| 538 |
+
time_tensor = torch.tensor(int(time.time()), device=accelerator.device)
|
| 539 |
+
time_int = broadcast(time_tensor, 0).item() # avoid different timestamps across processes
|
| 540 |
+
args.run_name = f"{args.exp_name}__{args.seed}__{time_int}"
|
| 541 |
+
self.local_seed = args.seed + accelerator.process_index * 100003 # Prime
|
| 542 |
+
if args.num_sample_generations > 0:
|
| 543 |
+
self.sample_generations_freq = max(1, args.num_total_batches // args.num_sample_generations)
|
| 544 |
+
self.local_dataloader_batch_size = args.local_batch_size
|
| 545 |
+
|
| 546 |
+
#########
|
| 547 |
+
# setup model, optimizer, and others
|
| 548 |
+
#########
|
| 549 |
+
for module in [self.policy_model, self.ref_model, self.value_model, self.reward_model]:
|
| 550 |
+
if module is not None:
|
| 551 |
+
disable_dropout_in_model(module)
|
| 552 |
+
self.model = PolicyAndValueWrapper(self.policy_model, self.value_model)
|
| 553 |
+
self.model.config = self.policy_model.config # needed for pushing to hub
|
| 554 |
+
self.create_optimizer_and_scheduler(
|
| 555 |
+
num_training_steps=args.num_total_batches
|
| 556 |
+
) # note that we are calling `self.lr_scheduler.step()` manually only at the batch level
|
| 557 |
+
|
| 558 |
+
#########
|
| 559 |
+
### trainer specifics
|
| 560 |
+
#########
|
| 561 |
+
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
|
| 562 |
+
self.callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
|
| 563 |
+
self.callback_handler = CallbackHandler(
|
| 564 |
+
self.callbacks, self.model, self.processing_class, self.optimizer, self.lr_scheduler
|
| 565 |
+
)
|
| 566 |
+
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
|
| 567 |
+
self.control = TrainerControl()
|
| 568 |
+
self.state = OnlineTrainerState(
|
| 569 |
+
is_local_process_zero=self.is_local_process_zero(),
|
| 570 |
+
is_world_process_zero=self.is_world_process_zero(),
|
| 571 |
+
stateful_callbacks=[
|
| 572 |
+
cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState)
|
| 573 |
+
],
|
| 574 |
+
)
|
| 575 |
+
self.current_flos = 0
|
| 576 |
+
self.hp_search_backend = None
|
| 577 |
+
self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None
|
| 578 |
+
self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None
|
| 579 |
+
# Create distant repo and output directory if needed
|
| 580 |
+
self.hub_model_id = None
|
| 581 |
+
if self.args.push_to_hub:
|
| 582 |
+
self.init_hf_repo()
|
| 583 |
+
if self.args.should_save:
|
| 584 |
+
os.makedirs(self.args.output_dir, exist_ok=True)
|
| 585 |
+
|
| 586 |
+
# Add tags for models that have been loaded with the correct transformers version
|
| 587 |
+
if hasattr(self.model, "add_model_tags"):
|
| 588 |
+
self.model.add_model_tags(self._tag_names)
|
| 589 |
+
|
| 590 |
+
#########
|
| 591 |
+
### setup dataloader
|
| 592 |
+
#########
|
| 593 |
+
self.dataloader = DataLoader(
|
| 594 |
+
self.train_dataset,
|
| 595 |
+
batch_size=self.local_dataloader_batch_size,
|
| 596 |
+
shuffle=True,
|
| 597 |
+
collate_fn=self.data_collator,
|
| 598 |
+
drop_last=True, # needed; otherwise the last batch will be of ragged shape
|
| 599 |
+
)
|
| 600 |
+
# sync random states for DataLoader(shuffle=True) before `accelerator.prepare`
|
| 601 |
+
# see https://gist.github.com/vwxyzjn/2581bff1e48e185e0b85b6dfe1def79c
|
| 602 |
+
torch.manual_seed(args.seed)
|
| 603 |
+
self.model, self.optimizer, self.dataloader = accelerator.prepare(self.model, self.optimizer, self.dataloader)
|
| 604 |
+
torch.manual_seed(self.local_seed) # reset the local seed again
|
| 605 |
+
|
| 606 |
+
self.eval_dataloader = DataLoader(
|
| 607 |
+
self.eval_dataset,
|
| 608 |
+
batch_size=args.per_device_eval_batch_size,
|
| 609 |
+
collate_fn=self.data_collator,
|
| 610 |
+
drop_last=True,
|
| 611 |
+
) # no need to shuffle eval dataset
|
| 612 |
+
self.eval_dataloader = accelerator.prepare(self.eval_dataloader)
|
| 613 |
+
|
| 614 |
+
if self.is_deepspeed_enabled:
|
| 615 |
+
self.reward_model = prepare_deepspeed(
|
| 616 |
+
self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16
|
| 617 |
+
)
|
| 618 |
+
|
| 619 |
+
if self.ref_model is None:
|
| 620 |
+
if not self.is_peft_model:
|
| 621 |
+
raise ValueError("No reference model and model is not a Peft model.")
|
| 622 |
+
else:
|
| 623 |
+
self.ref_model = prepare_deepspeed(
|
| 624 |
+
self.ref_model, args.per_device_train_batch_size, args.fp16, args.bf16
|
| 625 |
+
)
|
| 626 |
+
else:
|
| 627 |
+
if self.ref_model is None:
|
| 628 |
+
if not self.is_peft_model:
|
| 629 |
+
raise ValueError("No reference model and model is not a Peft model.")
|
| 630 |
+
else:
|
| 631 |
+
self.ref_model = self.ref_model.to(self.accelerator.device)
|
| 632 |
+
self.reward_model = self.reward_model.to(self.accelerator.device)
|
| 633 |
+
|
| 634 |
+
def get_train_dataloader(self) -> DataLoader:
|
| 635 |
+
return self.dataloader
|
| 636 |
+
|
| 637 |
+
def get_eval_dataloader(self) -> DataLoader:
|
| 638 |
+
return self.eval_dataloader
|
| 639 |
+
|
| 640 |
+
@contextmanager
|
| 641 |
+
def null_ref_context(self):
|
| 642 |
+
"""Context manager for handling null reference model (that is, peft adapter manipulation)."""
|
| 643 |
+
with (
|
| 644 |
+
self.accelerator.unwrap_model(self.model.policy).disable_adapter()
|
| 645 |
+
if self.is_peft_model and not self.ref_adapter_name
|
| 646 |
+
else nullcontext()
|
| 647 |
+
):
|
| 648 |
+
if self.ref_adapter_name:
|
| 649 |
+
self.model.policy.set_adapter(self.ref_adapter_name)
|
| 650 |
+
yield
|
| 651 |
+
if self.ref_adapter_name:
|
| 652 |
+
self.model.policy.set_adapter(self.model_adapter_name or "default")
|
| 653 |
+
|
| 654 |
+
def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):
|
| 655 |
+
backup_model = self.model
|
| 656 |
+
self.model = self.model.policy # save only the policy
|
| 657 |
+
|
| 658 |
+
if self.is_deepspeed_enabled:
|
| 659 |
+
backup_deepspeed = self.deepspeed
|
| 660 |
+
self.deepspeed = self.model
|
| 661 |
+
|
| 662 |
+
super().save_model(output_dir, _internal_call)
|
| 663 |
+
|
| 664 |
+
self.model = backup_model
|
| 665 |
+
|
| 666 |
+
if self.is_deepspeed_enabled:
|
| 667 |
+
self.deepspeed = backup_deepspeed
|
| 668 |
+
|
| 669 |
+
def train(self):
|
| 670 |
+
args = self.args
|
| 671 |
+
accelerator = self.accelerator
|
| 672 |
+
optimizer = self.optimizer
|
| 673 |
+
model = self.model
|
| 674 |
+
ref_policy = self.ref_model
|
| 675 |
+
reward_model = self.reward_model
|
| 676 |
+
processing_class = self.processing_class
|
| 677 |
+
dataloader = self.dataloader
|
| 678 |
+
device = accelerator.device
|
| 679 |
+
|
| 680 |
+
def repeat_generator():
|
| 681 |
+
while True:
|
| 682 |
+
yield from dataloader
|
| 683 |
+
|
| 684 |
+
iter_dataloader = iter(repeat_generator())
|
| 685 |
+
generation_config = GenerationConfig(
|
| 686 |
+
max_new_tokens=args.response_length,
|
| 687 |
+
temperature=(args.temperature + 1e-7),
|
| 688 |
+
top_k=0.0,
|
| 689 |
+
top_p=1.0,
|
| 690 |
+
do_sample=True,
|
| 691 |
+
)
|
| 692 |
+
|
| 693 |
+
accelerator.print("===training policy===")
|
| 694 |
+
start_time = time.time()
|
| 695 |
+
stats_shape = (args.num_ppo_epochs, args.num_mini_batches, args.gradient_accumulation_steps)
|
| 696 |
+
approxkl_stats = torch.zeros(stats_shape, device=device)
|
| 697 |
+
pg_clipfrac_stats = torch.zeros(stats_shape, device=device)
|
| 698 |
+
pg_loss_stats = torch.zeros(stats_shape, device=device)
|
| 699 |
+
vf_loss_stats = torch.zeros(stats_shape, device=device)
|
| 700 |
+
vf_clipfrac_stats = torch.zeros(stats_shape, device=device)
|
| 701 |
+
entropy_stats = torch.zeros(stats_shape, device=device)
|
| 702 |
+
ratio_stats = torch.zeros(stats_shape, device=device)
|
| 703 |
+
model.train()
|
| 704 |
+
|
| 705 |
+
# trainer state initialization
|
| 706 |
+
self.state.global_step = 0
|
| 707 |
+
self.state.episode = 0
|
| 708 |
+
self.state.max_steps = args.num_total_batches * args.num_mini_batches
|
| 709 |
+
self.state.num_train_epochs = args.total_episodes / self.train_dataset_len
|
| 710 |
+
# Compute absolute values for logging, eval, and save if given as ratio
|
| 711 |
+
if args.logging_steps is not None:
|
| 712 |
+
if args.logging_steps < 1:
|
| 713 |
+
self.state.logging_steps = math.ceil(self.state.max_steps * args.logging_steps)
|
| 714 |
+
else:
|
| 715 |
+
self.state.logging_steps = args.logging_steps
|
| 716 |
+
if args.eval_steps is not None:
|
| 717 |
+
if args.eval_steps < 1:
|
| 718 |
+
self.state.eval_steps = math.ceil(self.state.max_steps * args.eval_steps)
|
| 719 |
+
else:
|
| 720 |
+
self.state.eval_steps = args.eval_steps
|
| 721 |
+
if args.save_steps is not None:
|
| 722 |
+
if args.save_steps < 1:
|
| 723 |
+
self.state.save_steps = math.ceil(self.state.max_steps * args.save_steps)
|
| 724 |
+
else:
|
| 725 |
+
self.state.save_steps = args.save_steps
|
| 726 |
+
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
|
| 727 |
+
|
| 728 |
+
# backward compatibility
|
| 729 |
+
if self.is_deepspeed_enabled:
|
| 730 |
+
self.deepspeed = self.model
|
| 731 |
+
self.model_wrapped = self.model
|
| 732 |
+
|
| 733 |
+
for update in range(1, args.num_total_batches + 1):
|
| 734 |
+
self.state.episode += 1 * args.batch_size
|
| 735 |
+
data = next(iter_dataloader)
|
| 736 |
+
with torch.no_grad():
|
| 737 |
+
queries = data["input_ids"].to(device)
|
| 738 |
+
context_length = queries.shape[1]
|
| 739 |
+
responses = []
|
| 740 |
+
postprocessed_responses = []
|
| 741 |
+
logprobs = []
|
| 742 |
+
ref_logprobs = []
|
| 743 |
+
scores = []
|
| 744 |
+
sequence_lengths = []
|
| 745 |
+
values = []
|
| 746 |
+
with unwrap_model_for_generation(
|
| 747 |
+
self.model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation
|
| 748 |
+
) as unwrapped_model:
|
| 749 |
+
query_responses, logitss = batch_generation(
|
| 750 |
+
unwrapped_model.policy,
|
| 751 |
+
queries,
|
| 752 |
+
args.local_rollout_forward_batch_size,
|
| 753 |
+
processing_class.pad_token_id,
|
| 754 |
+
generation_config,
|
| 755 |
+
)
|
| 756 |
+
|
| 757 |
+
for i in range(0, queries.shape[0], args.local_rollout_forward_batch_size):
|
| 758 |
+
query = queries[i : i + args.local_rollout_forward_batch_size]
|
| 759 |
+
query_response = query_responses[i : i + args.local_rollout_forward_batch_size]
|
| 760 |
+
response = query_response[:, context_length:]
|
| 761 |
+
logits = logitss[i : i + args.local_rollout_forward_batch_size]
|
| 762 |
+
logprob = selective_log_softmax(logits, response)
|
| 763 |
+
del logits
|
| 764 |
+
torch.cuda.empty_cache()
|
| 765 |
+
|
| 766 |
+
if ref_policy is None:
|
| 767 |
+
with self.null_ref_context():
|
| 768 |
+
ref_output = forward(model.policy, query_response, processing_class.pad_token_id)
|
| 769 |
+
else:
|
| 770 |
+
ref_output = forward(ref_policy, query_response, processing_class.pad_token_id)
|
| 771 |
+
ref_logits = ref_output.logits[:, context_length - 1 : -1]
|
| 772 |
+
ref_logits /= args.temperature + 1e-7
|
| 773 |
+
ref_logprob = selective_log_softmax(ref_logits, response)
|
| 774 |
+
del ref_output, ref_logits
|
| 775 |
+
torch.cuda.empty_cache()
|
| 776 |
+
|
| 777 |
+
# Response Processing 1. truncate response after the first occurrence of `stop_token_id`
|
| 778 |
+
postprocessed_response = response
|
| 779 |
+
if self.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0
|
| 780 |
+
postprocessed_response = truncate_response(
|
| 781 |
+
self.stop_token_id, processing_class.pad_token_id, response
|
| 782 |
+
)
|
| 783 |
+
|
| 784 |
+
# Response Processing 2. run reward model on the truncated responses
|
| 785 |
+
postprocessed_query_response = torch.cat((query, postprocessed_response), 1)
|
| 786 |
+
sequence_length = first_true_indices(postprocessed_response == processing_class.pad_token_id) - 1
|
| 787 |
+
unwrapped_value_model = accelerator.unwrap_model(model).value_model
|
| 788 |
+
full_value, _, _ = get_reward(
|
| 789 |
+
unwrapped_value_model, query_response, processing_class.pad_token_id, context_length
|
| 790 |
+
)
|
| 791 |
+
value = full_value[:, context_length - 1 : -1].squeeze(-1)
|
| 792 |
+
_, score, _ = get_reward(
|
| 793 |
+
reward_model, postprocessed_query_response, processing_class.pad_token_id, context_length
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
+
responses.append(response)
|
| 797 |
+
postprocessed_responses.append(postprocessed_response)
|
| 798 |
+
logprobs.append(logprob)
|
| 799 |
+
ref_logprobs.append(ref_logprob)
|
| 800 |
+
sequence_lengths.append(sequence_length)
|
| 801 |
+
scores.append(score)
|
| 802 |
+
values.append(value)
|
| 803 |
+
responses = torch.cat(responses, 0)
|
| 804 |
+
postprocessed_responses = torch.cat(postprocessed_responses, 0)
|
| 805 |
+
logprobs = torch.cat(logprobs, 0)
|
| 806 |
+
ref_logprobs = torch.cat(ref_logprobs, 0)
|
| 807 |
+
sequence_lengths = torch.cat(sequence_lengths, 0)
|
| 808 |
+
scores = torch.cat(scores, 0)
|
| 809 |
+
values = torch.cat(values, 0)
|
| 810 |
+
del (logprob, ref_logprob, full_value, value, score, unwrapped_model)
|
| 811 |
+
torch.cuda.empty_cache()
|
| 812 |
+
gc.collect()
|
| 813 |
+
|
| 814 |
+
# Response Processing 3. Filter completion. Ensure that the sample contains stop_token_id
|
| 815 |
+
# Completions not passing that filter will receive a lower score.
|
| 816 |
+
contain_eos_token = torch.any(postprocessed_responses == self.processing_class.eos_token_id, dim=-1)
|
| 817 |
+
if self.args.missing_eos_penalty is not None:
|
| 818 |
+
scores[~contain_eos_token] -= self.args.missing_eos_penalty
|
| 819 |
+
# accelerator.print(f"{scores=}, {(contain_eos_token.sum() / len(contain_eos_token))=}")
|
| 820 |
+
|
| 821 |
+
# be very careful with `padding_mask_p1`; see https://excalidraw.com/#json=LWnzG4w2k5DjF_EOL_xPt,e2w3a-hFJ_gX5vOfeyXGTw
|
| 822 |
+
response_idxs = torch.arange(responses.shape[1], device=responses.device).repeat(responses.shape[0], 1)
|
| 823 |
+
padding_mask = response_idxs > sequence_lengths.unsqueeze(1)
|
| 824 |
+
logprobs = torch.masked_fill(logprobs, padding_mask, INVALID_LOGPROB)
|
| 825 |
+
ref_logprobs = torch.masked_fill(ref_logprobs, padding_mask, INVALID_LOGPROB)
|
| 826 |
+
sequence_lengths_p1 = sequence_lengths + 1
|
| 827 |
+
padding_mask_p1 = response_idxs > (sequence_lengths_p1.unsqueeze(1))
|
| 828 |
+
values = torch.masked_fill(values, padding_mask_p1, 0)
|
| 829 |
+
|
| 830 |
+
# 4. compute rewards
|
| 831 |
+
kl = logprobs - ref_logprobs
|
| 832 |
+
non_score_reward = -args.kl_coef * kl
|
| 833 |
+
rewards = non_score_reward.clone()
|
| 834 |
+
actual_start = torch.arange(rewards.size(0), device=rewards.device)
|
| 835 |
+
actual_end = torch.where(sequence_lengths_p1 < rewards.size(1), sequence_lengths_p1, sequence_lengths)
|
| 836 |
+
rewards[[actual_start, actual_end]] += scores
|
| 837 |
+
|
| 838 |
+
# 5. whiten rewards
|
| 839 |
+
if args.whiten_rewards:
|
| 840 |
+
rewards = masked_whiten(rewards, mask=~padding_mask_p1, shift_mean=False)
|
| 841 |
+
rewards = torch.masked_fill(rewards, padding_mask_p1, 0)
|
| 842 |
+
|
| 843 |
+
# 6. compute advantages and returns
|
| 844 |
+
lastgaelam = 0
|
| 845 |
+
advantages_reversed = []
|
| 846 |
+
gen_length = responses.shape[1]
|
| 847 |
+
for t in reversed(range(gen_length)):
|
| 848 |
+
nextvalues = values[:, t + 1] if t < gen_length - 1 else 0.0
|
| 849 |
+
delta = rewards[:, t] + args.gamma * nextvalues - values[:, t]
|
| 850 |
+
lastgaelam = delta + args.gamma * args.lam * lastgaelam
|
| 851 |
+
advantages_reversed.append(lastgaelam)
|
| 852 |
+
advantages = torch.stack(advantages_reversed[::-1], axis=1)
|
| 853 |
+
returns = advantages + values
|
| 854 |
+
advantages = masked_whiten(advantages, ~padding_mask)
|
| 855 |
+
advantages = torch.masked_fill(advantages, padding_mask, 0)
|
| 856 |
+
torch.cuda.empty_cache()
|
| 857 |
+
|
| 858 |
+
# Do multiple epochs of PPO training, with a fresh random shuffle in each epoch
|
| 859 |
+
for ppo_epoch_idx in range(args.num_ppo_epochs):
|
| 860 |
+
b_inds = np.random.permutation(args.local_batch_size)
|
| 861 |
+
minibatch_idx = 0
|
| 862 |
+
for mini_batch_start in range(0, args.local_batch_size, args.local_mini_batch_size):
|
| 863 |
+
mini_batch_end = mini_batch_start + args.local_mini_batch_size
|
| 864 |
+
mini_batch_inds = b_inds[mini_batch_start:mini_batch_end]
|
| 865 |
+
gradient_accumulation_idx = 0
|
| 866 |
+
for micro_batch_start in range(0, args.local_mini_batch_size, args.per_device_train_batch_size):
|
| 867 |
+
with accelerator.accumulate(model):
|
| 868 |
+
micro_batch_end = micro_batch_start + args.per_device_train_batch_size
|
| 869 |
+
micro_batch_inds = mini_batch_inds[micro_batch_start:micro_batch_end]
|
| 870 |
+
mb_advantage = advantages[micro_batch_inds]
|
| 871 |
+
mb_responses = responses[micro_batch_inds]
|
| 872 |
+
mb_query_responses = query_responses[micro_batch_inds]
|
| 873 |
+
mb_logprobs = logprobs[micro_batch_inds]
|
| 874 |
+
mb_return = returns[micro_batch_inds]
|
| 875 |
+
mb_values = values[micro_batch_inds]
|
| 876 |
+
|
| 877 |
+
output, vpred_temp = forward(model, mb_query_responses, processing_class.pad_token_id)
|
| 878 |
+
logits = output.logits[:, context_length - 1 : -1]
|
| 879 |
+
logits /= args.temperature + 1e-7
|
| 880 |
+
new_logprobs = selective_log_softmax(logits, mb_responses)
|
| 881 |
+
new_logprobs = torch.masked_fill(
|
| 882 |
+
new_logprobs, padding_mask[micro_batch_inds], INVALID_LOGPROB
|
| 883 |
+
)
|
| 884 |
+
vpred = vpred_temp[:, context_length - 1 : -1].squeeze(-1)
|
| 885 |
+
vpred = torch.masked_fill(vpred, padding_mask_p1[micro_batch_inds], 0)
|
| 886 |
+
vpredclipped = torch.clamp(
|
| 887 |
+
vpred,
|
| 888 |
+
mb_values - args.cliprange_value,
|
| 889 |
+
mb_values + args.cliprange_value,
|
| 890 |
+
)
|
| 891 |
+
vf_losses1 = torch.square(vpred - mb_return)
|
| 892 |
+
vf_losses2 = torch.square(vpredclipped - mb_return)
|
| 893 |
+
vf_loss_max = torch.max(vf_losses1, vf_losses2)
|
| 894 |
+
vf_loss = 0.5 * masked_mean(vf_loss_max, ~padding_mask_p1[micro_batch_inds])
|
| 895 |
+
vf_clipfrac = masked_mean(
|
| 896 |
+
(vf_losses2 > vf_losses1).float(), ~padding_mask_p1[micro_batch_inds]
|
| 897 |
+
)
|
| 898 |
+
logprobs_diff = new_logprobs - mb_logprobs
|
| 899 |
+
ratio = torch.exp(logprobs_diff)
|
| 900 |
+
pg_losses = -mb_advantage * ratio
|
| 901 |
+
pg_losses2 = -mb_advantage * torch.clamp(ratio, 1.0 - args.cliprange, 1.0 + args.cliprange)
|
| 902 |
+
pg_loss_max = torch.max(pg_losses, pg_losses2)
|
| 903 |
+
pg_loss = masked_mean(pg_loss_max, ~padding_mask[micro_batch_inds])
|
| 904 |
+
loss = pg_loss + args.vf_coef * vf_loss
|
| 905 |
+
accelerator.backward(loss)
|
| 906 |
+
optimizer.step()
|
| 907 |
+
optimizer.zero_grad()
|
| 908 |
+
with torch.no_grad():
|
| 909 |
+
pg_clipfrac = masked_mean(
|
| 910 |
+
(pg_losses2 > pg_losses).float(), ~padding_mask[micro_batch_inds]
|
| 911 |
+
)
|
| 912 |
+
prob_dist = torch.nn.functional.softmax(logits, dim=-1)
|
| 913 |
+
entropy = torch.logsumexp(logits, dim=-1) - torch.sum(prob_dist * logits, dim=-1)
|
| 914 |
+
approxkl = 0.5 * (logprobs_diff**2).mean()
|
| 915 |
+
approxkl_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = approxkl
|
| 916 |
+
pg_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = (
|
| 917 |
+
pg_clipfrac
|
| 918 |
+
)
|
| 919 |
+
pg_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = pg_loss
|
| 920 |
+
vf_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = vf_loss
|
| 921 |
+
vf_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = (
|
| 922 |
+
vf_clipfrac
|
| 923 |
+
)
|
| 924 |
+
entropy_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = entropy.mean()
|
| 925 |
+
ratio_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = ratio.mean()
|
| 926 |
+
gradient_accumulation_idx += 1
|
| 927 |
+
minibatch_idx += 1
|
| 928 |
+
# del everything and empty cache
|
| 929 |
+
# fmt: off
|
| 930 |
+
del (
|
| 931 |
+
output, vpred_temp, logits, new_logprobs, vpred, vpredclipped,
|
| 932 |
+
vf_losses1, vf_losses2, vf_loss, vf_clipfrac, logprobs_diff, ratio, pg_losses, pg_losses2, pg_loss_max,
|
| 933 |
+
pg_loss, loss, pg_clipfrac, prob_dist, entropy, approxkl, mb_return,
|
| 934 |
+
mb_advantage, mb_values, mb_responses, mb_query_responses, mb_logprobs,
|
| 935 |
+
)
|
| 936 |
+
# fmt: on
|
| 937 |
+
torch.cuda.empty_cache()
|
| 938 |
+
with torch.no_grad():
|
| 939 |
+
mean_kl = kl.sum(1).mean()
|
| 940 |
+
mean_entropy = (-logprobs).sum(1).mean()
|
| 941 |
+
mean_non_score_reward = non_score_reward.sum(1).mean()
|
| 942 |
+
rlhf_reward = mean_non_score_reward + scores.mean()
|
| 943 |
+
eps = int(self.state.episode / (time.time() - start_time))
|
| 944 |
+
metrics = {}
|
| 945 |
+
metrics["eps"] = eps
|
| 946 |
+
metrics["objective/kl"] = self.accelerator.gather_for_metrics(mean_kl).mean().item()
|
| 947 |
+
metrics["objective/entropy"] = self.accelerator.gather_for_metrics(mean_entropy).mean().item()
|
| 948 |
+
metrics["objective/non_score_reward"] = (
|
| 949 |
+
self.accelerator.gather_for_metrics(mean_non_score_reward).mean().item()
|
| 950 |
+
)
|
| 951 |
+
metrics["objective/rlhf_reward"] = self.accelerator.gather_for_metrics(rlhf_reward).mean().item()
|
| 952 |
+
metrics["objective/scores"] = self.accelerator.gather_for_metrics(scores.mean()).mean().item()
|
| 953 |
+
metrics["policy/approxkl_avg"] = self.accelerator.gather_for_metrics(approxkl_stats).mean().item()
|
| 954 |
+
metrics["policy/clipfrac_avg"] = self.accelerator.gather_for_metrics(pg_clipfrac_stats).mean().item()
|
| 955 |
+
metrics["loss/policy_avg"] = self.accelerator.gather_for_metrics(pg_loss_stats).mean().item()
|
| 956 |
+
metrics["loss/value_avg"] = self.accelerator.gather_for_metrics(vf_loss_stats).mean().item()
|
| 957 |
+
metrics["val/clipfrac_avg"] = self.accelerator.gather_for_metrics(vf_clipfrac_stats).mean().item()
|
| 958 |
+
metrics["policy/entropy_avg"] = self.accelerator.gather_for_metrics(entropy_stats).mean().item()
|
| 959 |
+
metrics["val/ratio"] = self.accelerator.gather_for_metrics(ratio_stats).mean().item()
|
| 960 |
+
metrics["val/ratio_var"] = self.accelerator.gather_for_metrics(ratio_stats).var().item()
|
| 961 |
+
metrics["val/num_eos_tokens"] = (responses == processing_class.eos_token_id).sum().item()
|
| 962 |
+
metrics["lr"] = self.lr_scheduler.get_last_lr()[0]
|
| 963 |
+
metrics["episode"] = self.state.episode
|
| 964 |
+
self.state.epoch = self.state.episode / self.train_dataset_len # used by self.log
|
| 965 |
+
self.state.global_step += 1
|
| 966 |
+
self.log(metrics)
|
| 967 |
+
|
| 968 |
+
self.lr_scheduler.step()
|
| 969 |
+
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
|
| 970 |
+
if self.control.should_save:
|
| 971 |
+
self._save_checkpoint(model, trial=None)
|
| 972 |
+
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
|
| 973 |
+
del kl, mean_kl, mean_entropy, mean_non_score_reward, scores, metrics, non_score_reward
|
| 974 |
+
torch.cuda.empty_cache()
|
| 975 |
+
gc.collect()
|
| 976 |
+
|
| 977 |
+
if args.num_sample_generations > 0 and (update - 1) % self.sample_generations_freq == 0:
|
| 978 |
+
self.generate_completions(sampling=True)
|
| 979 |
+
torch.cuda.empty_cache()
|
| 980 |
+
del (
|
| 981 |
+
query_responses,
|
| 982 |
+
responses,
|
| 983 |
+
postprocessed_responses,
|
| 984 |
+
logprobs,
|
| 985 |
+
ref_logprobs,
|
| 986 |
+
values,
|
| 987 |
+
sequence_lengths,
|
| 988 |
+
contain_eos_token,
|
| 989 |
+
sequence_lengths_p1,
|
| 990 |
+
response_idxs,
|
| 991 |
+
padding_mask,
|
| 992 |
+
padding_mask_p1,
|
| 993 |
+
rewards,
|
| 994 |
+
actual_start,
|
| 995 |
+
actual_end,
|
| 996 |
+
advantages,
|
| 997 |
+
returns,
|
| 998 |
+
)
|
| 999 |
+
torch.cuda.empty_cache()
|
| 1000 |
+
|
| 1001 |
+
# HF trainer specifics
|
| 1002 |
+
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
|
| 1003 |
+
if self.control.should_save:
|
| 1004 |
+
self._save_checkpoint(model, trial=None, metrics=None)
|
| 1005 |
+
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
|
| 1006 |
+
|
| 1007 |
+
def generate_completions(self, sampling: bool = False):
|
| 1008 |
+
args = self.args
|
| 1009 |
+
processing_class = self.processing_class
|
| 1010 |
+
generation_config = GenerationConfig(
|
| 1011 |
+
max_new_tokens=self.args.response_length,
|
| 1012 |
+
temperature=(0.01 + 1e-7),
|
| 1013 |
+
top_k=0.0,
|
| 1014 |
+
top_p=1.0,
|
| 1015 |
+
do_sample=True,
|
| 1016 |
+
)
|
| 1017 |
+
|
| 1018 |
+
table = defaultdict(list)
|
| 1019 |
+
with unwrap_model_for_generation(
|
| 1020 |
+
self.model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation
|
| 1021 |
+
) as unwrapped_model:
|
| 1022 |
+
for batch in self.eval_dataloader:
|
| 1023 |
+
query = batch["input_ids"]
|
| 1024 |
+
with torch.no_grad():
|
| 1025 |
+
context_length = query.shape[1]
|
| 1026 |
+
query_response, _ = batch_generation(
|
| 1027 |
+
unwrapped_model.policy,
|
| 1028 |
+
query,
|
| 1029 |
+
query.shape[0],
|
| 1030 |
+
processing_class.pad_token_id,
|
| 1031 |
+
generation_config,
|
| 1032 |
+
)
|
| 1033 |
+
response = query_response[:, context_length:]
|
| 1034 |
+
postprocessed_response = response
|
| 1035 |
+
if self.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0
|
| 1036 |
+
postprocessed_response = truncate_response(
|
| 1037 |
+
self.stop_token_id, processing_class.pad_token_id, response
|
| 1038 |
+
)
|
| 1039 |
+
table["query"].extend(
|
| 1040 |
+
gather_object(processing_class.batch_decode(query, skip_special_tokens=True))
|
| 1041 |
+
)
|
| 1042 |
+
table["model response"].extend(
|
| 1043 |
+
gather_object(processing_class.batch_decode(postprocessed_response))
|
| 1044 |
+
)
|
| 1045 |
+
|
| 1046 |
+
postprocessed_query_response = torch.cat((query, postprocessed_response), 1)
|
| 1047 |
+
_, score, _ = get_reward(
|
| 1048 |
+
self.reward_model, postprocessed_query_response, processing_class.pad_token_id, context_length
|
| 1049 |
+
)
|
| 1050 |
+
table["score"].extend(self.accelerator.gather_for_metrics(score).float().cpu().numpy())
|
| 1051 |
+
|
| 1052 |
+
if sampling:
|
| 1053 |
+
break
|
| 1054 |
+
df = pd.DataFrame(table)
|
| 1055 |
+
|
| 1056 |
+
if self.accelerator.is_main_process:
|
| 1057 |
+
print_rich_table(df.iloc[0 : 0 + 5])
|
| 1058 |
+
if "wandb" in args.report_to:
|
| 1059 |
+
import wandb
|
| 1060 |
+
|
| 1061 |
+
if wandb.run is not None:
|
| 1062 |
+
wandb.log({"completions": wandb.Table(dataframe=df)})
|
| 1063 |
+
|
| 1064 |
+
if "comet_ml" in args.report_to:
|
| 1065 |
+
log_table_to_comet_experiment(
|
| 1066 |
+
name="completions.csv",
|
| 1067 |
+
table=df,
|
| 1068 |
+
)
|
| 1069 |
+
|
| 1070 |
+
def create_model_card(
|
| 1071 |
+
self,
|
| 1072 |
+
model_name: Optional[str] = None,
|
| 1073 |
+
dataset_name: Optional[str] = None,
|
| 1074 |
+
tags: Union[str, list[str], None] = None,
|
| 1075 |
+
):
|
| 1076 |
+
"""
|
| 1077 |
+
Creates a draft of a model card using the information available to the `Trainer`.
|
| 1078 |
+
|
| 1079 |
+
Args:
|
| 1080 |
+
model_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1081 |
+
Name of the model.
|
| 1082 |
+
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
| 1083 |
+
Name of the dataset used for training.
|
| 1084 |
+
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
| 1085 |
+
Tags to be associated with the model card.
|
| 1086 |
+
"""
|
| 1087 |
+
if not self.is_world_process_zero():
|
| 1088 |
+
return
|
| 1089 |
+
|
| 1090 |
+
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
| 1091 |
+
base_model = self.model.config._name_or_path
|
| 1092 |
+
else:
|
| 1093 |
+
base_model = None
|
| 1094 |
+
|
| 1095 |
+
tags = tags or []
|
| 1096 |
+
if isinstance(tags, str):
|
| 1097 |
+
tags = [tags]
|
| 1098 |
+
|
| 1099 |
+
if hasattr(self.model.config, "unsloth_version"):
|
| 1100 |
+
tags.append("unsloth")
|
| 1101 |
+
|
| 1102 |
+
citation = textwrap.dedent("""\
|
| 1103 |
+
@article{mziegler2019fine-tuning,
|
| 1104 |
+
title = {{Fine-Tuning Language Models from Human Preferences}},
|
| 1105 |
+
author = {Daniel M. Ziegler and Nisan Stiennon and Jeffrey Wu and Tom B. Brown and Alec Radford and Dario Amodei and Paul F. Christiano and Geoffrey Irving},
|
| 1106 |
+
year = 2019,
|
| 1107 |
+
eprint = {arXiv:1909.08593}
|
| 1108 |
+
}""")
|
| 1109 |
+
|
| 1110 |
+
model_card = generate_model_card(
|
| 1111 |
+
base_model=base_model,
|
| 1112 |
+
model_name=model_name,
|
| 1113 |
+
hub_model_id=self.hub_model_id,
|
| 1114 |
+
dataset_name=dataset_name,
|
| 1115 |
+
tags=tags,
|
| 1116 |
+
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
| 1117 |
+
comet_url=get_comet_experiment_url(),
|
| 1118 |
+
trainer_name="PPO",
|
| 1119 |
+
trainer_citation=citation,
|
| 1120 |
+
paper_title="Fine-Tuning Language Models from Human Preferences",
|
| 1121 |
+
paper_id="1909.08593",
|
| 1122 |
+
)
|
| 1123 |
+
|
| 1124 |
+
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
| 1125 |
+
class UnslothPPOTrainer(_UnslothPPOTrainer):
|
| 1126 |
+
"""
|
| 1127 |
+
|
| 1128 |
+
"""
|
| 1129 |
+
def __init__(
|
| 1130 |
+
self,
|
| 1131 |
+
args,
|
| 1132 |
+
processing_class,
|
| 1133 |
+
model,
|
| 1134 |
+
ref_model,
|
| 1135 |
+
reward_model,
|
| 1136 |
+
train_dataset,
|
| 1137 |
+
value_model = None,
|
| 1138 |
+
data_collator = None,
|
| 1139 |
+
eval_dataset = None,
|
| 1140 |
+
callbacks = None,
|
| 1141 |
+
peft_config = None,
|
| 1142 |
+
**kwargs
|
| 1143 |
+
):
|
| 1144 |
+
if args is None: args = UnslothPPOConfig()
|
| 1145 |
+
use_bf16 = getattr(args, 'bf16', False)
|
| 1146 |
+
use_fp16 = getattr(args, 'fp16', False)
|
| 1147 |
+
force_float32 = False
|
| 1148 |
+
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
| 1149 |
+
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
| 1150 |
+
force_float32 = True
|
| 1151 |
+
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
| 1152 |
+
dtype = getattr(model.config, 'torch_dtype', None)
|
| 1153 |
+
if dtype is None: dtype = model.get_input_embeddings().dtype
|
| 1154 |
+
from unsloth_zoo.utils import _get_dtype
|
| 1155 |
+
dtype = _get_dtype(dtype)
|
| 1156 |
+
float16 = dtype == torch.float16
|
| 1157 |
+
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
| 1158 |
+
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
| 1159 |
+
if force_float32:
|
| 1160 |
+
args.fp16 = False
|
| 1161 |
+
args.bf16 = False
|
| 1162 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
| 1163 |
+
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
| 1164 |
+
args.fp16 = float16
|
| 1165 |
+
args.bf16 = not float16
|
| 1166 |
+
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
| 1167 |
+
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
| 1168 |
+
args.eval_strategy = 'steps'
|
| 1169 |
+
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
| 1170 |
+
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
| 1171 |
+
if ga_steps is not None and ga_steps > 1:
|
| 1172 |
+
from transformers import __version__ as transformers_version
|
| 1173 |
+
if Version(transformers_version) <= Version('4.45.2'):
|
| 1174 |
+
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
| 1175 |
+
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
| 1176 |
+
if getattr(args, 'eval_strategy', 'no') != 'no':
|
| 1177 |
+
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
| 1178 |
+
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
| 1179 |
+
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
| 1180 |
+
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
| 1181 |
+
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
| 1182 |
+
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
| 1183 |
+
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
| 1184 |
+
if force_float32:
|
| 1185 |
+
args.bf16_full_eval = False
|
| 1186 |
+
args.fp16_full_eval = False
|
| 1187 |
+
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
| 1188 |
+
args.bf16_full_eval = True
|
| 1189 |
+
args.fp16_full_eval = False
|
| 1190 |
+
elif not bf16_full_eval and not fp16_full_eval:
|
| 1191 |
+
args.bf16_full_eval = args.bf16
|
| 1192 |
+
args.fp16_full_eval = args.fp16
|
| 1193 |
+
_output_logits = False
|
| 1194 |
+
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
| 1195 |
+
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
| 1196 |
+
if _output_logits:
|
| 1197 |
+
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
| 1198 |
+
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
| 1199 |
+
pass
|
| 1200 |
+
else:
|
| 1201 |
+
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
| 1202 |
+
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
| 1203 |
+
if args_max_seq_length is None and model_max_seq_length is not None:
|
| 1204 |
+
max_seq_length = model.max_seq_length
|
| 1205 |
+
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
| 1206 |
+
if model is not None and hasattr(model, 'for_training'):
|
| 1207 |
+
model.for_training()
|
| 1208 |
+
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
| 1209 |
+
if 'processing_class' in locals():
|
| 1210 |
+
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
| 1211 |
+
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
| 1212 |
+
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
| 1213 |
+
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
| 1214 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1215 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
| 1216 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer, mlm = False)
|
| 1217 |
+
elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
| 1218 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer)
|
| 1219 |
+
else:
|
| 1220 |
+
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
| 1221 |
+
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
| 1222 |
+
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
| 1223 |
+
if not isinstance(data_collator, UnslothVisionDataCollator):
|
| 1224 |
+
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
| 1225 |
+
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
| 1226 |
+
data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer)
|
| 1227 |
+
else:
|
| 1228 |
+
data_collator = DataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False)
|
| 1229 |
+
other_metrics = []
|
| 1230 |
+
|
| 1231 |
+
from unsloth_zoo.logging_utils import PatchRLStatistics
|
| 1232 |
+
PatchRLStatistics('ppo_trainer', other_metrics)
|
| 1233 |
+
|
| 1234 |
+
super().__init__(
|
| 1235 |
+
args = args,
|
| 1236 |
+
processing_class = processing_class,
|
| 1237 |
+
model = model,
|
| 1238 |
+
ref_model = ref_model,
|
| 1239 |
+
reward_model = reward_model,
|
| 1240 |
+
train_dataset = train_dataset,
|
| 1241 |
+
value_model = value_model,
|
| 1242 |
+
data_collator = data_collator,
|
| 1243 |
+
eval_dataset = eval_dataset,
|
| 1244 |
+
callbacks = callbacks,
|
| 1245 |
+
peft_config = peft_config,**kwargs)
|
| 1246 |
+
if hasattr(self, 'neftune_hook_handle'):
|
| 1247 |
+
self.neftune_hook_handle.remove()
|
| 1248 |
+
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
| 1249 |
+
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
| 1250 |
+
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
| 1251 |
+
pass
|
| 1252 |
+
|
| 1253 |
+
pass
|