Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
antypasd commited on
Commit
17fa9ba
·
verified ·
1 Parent(s): a0d15c7

Delete loading script auxiliary file

Browse files
Files changed (1) hide show
  1. lm_finetuning.py +0 -250
lm_finetuning.py DELETED
@@ -1,250 +0,0 @@
1
- '''
2
- wandb offline
3
- export WANDB_DISABLED='true'
4
- export RAY_RESULTS='ray_results'
5
-
6
- python lm_finetuning.py -m "roberta-large" -o "ckpt/2021/roberta-large" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-large-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
7
- python lm_finetuning.py -m "roberta-large" -o "ckpt/2020/roberta-large" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-large-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
8
-
9
- python lm_finetuning.py -m "roberta-base" -o "ckpt/2021/roberta_base" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-base-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
10
- python lm_finetuning.py -m "roberta-base" -o "ckpt/2020/roberta_base" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-base-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
11
-
12
- python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-2019-90m" -o "ckpt/2021/twitter-roberta-base-2019-90m" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
13
- python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-2019-90m" -o "ckpt/2020/twitter-roberta-base-2019-90m" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
14
-
15
- python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2020" -o "ckpt/2021/twitter-roberta-base-dec2020" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-dec2020-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
16
- python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2020" -o "ckpt/2020/twitter-roberta-base-dec2020" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-dec2020-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
17
-
18
- python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2021" -o "ckpt/2021/twitter-roberta-base-dec2021" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-dec2021-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
19
- python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2021" -o "ckpt/2020/twitter-roberta-base-dec2021" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-dec2021-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
20
- '''
21
-
22
- import argparse
23
- import json
24
- import logging
25
- import os
26
- import math
27
- import shutil
28
- import urllib.request
29
- import multiprocessing
30
- from os.path import join as pj
31
-
32
- import torch
33
- import numpy as np
34
- from huggingface_hub import create_repo
35
- from datasets import load_dataset, load_metric
36
- from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
37
- from ray import tune
38
-
39
- from readme import get_readme
40
-
41
-
42
- logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
43
-
44
- PARALLEL = bool(int(os.getenv("PARALLEL", 1)))
45
- RAY_RESULTS = os.getenv("RAY_RESULTS", "ray_results")
46
- LABEL2ID = {
47
- "arts_&_culture": 0,
48
- "business_&_entrepreneurs": 1,
49
- "celebrity_&_pop_culture": 2,
50
- "diaries_&_daily_life": 3,
51
- "family": 4,
52
- "fashion_&_style": 5,
53
- "film_tv_&_video": 6,
54
- "fitness_&_health": 7,
55
- "food_&_dining": 8,
56
- "gaming": 9,
57
- "learning_&_educational": 10,
58
- "music": 11,
59
- "news_&_social_concern": 12,
60
- "other_hobbies": 13,
61
- "relationships": 14,
62
- "science_&_technology": 15,
63
- "sports": 16,
64
- "travel_&_adventure": 17,
65
- "youth_&_student_life": 18
66
- }
67
- ID2LABEL = {v: k for k, v in LABEL2ID.items()}
68
-
69
-
70
- def internet_connection(host='http://google.com'):
71
- try:
72
- urllib.request.urlopen(host)
73
- return True
74
- except:
75
- return False
76
-
77
-
78
- def sigmoid(x):
79
- return 1 / (1 + math.exp(-x))
80
-
81
-
82
- def get_metrics():
83
- metric_accuracy = load_metric("accuracy", "multilabel")
84
- metric_f1 = load_metric("f1", "multilabel")
85
-
86
- # metric_f1.compute(predictions=[[0, 1, 1], [1, 1, 0]], references=[[0, 1, 1], [0, 1, 0]], average='micro')
87
- # metric_accuracy.compute(predictions=[[0, 1, 1], [1, 1, 0]], references=[[0, 1, 1], [0, 1, 0]])
88
-
89
- def compute_metric_search(eval_pred):
90
- logits, labels = eval_pred
91
- predictions = np.array([[int(sigmoid(j) > 0.5) for j in i] for i in logits])
92
- return metric_f1.compute(predictions=predictions, references=labels, average='micro')
93
-
94
- def compute_metric_all(eval_pred):
95
- logits, labels = eval_pred
96
- predictions = np.array([[int(sigmoid(j) > 0.5) for j in i] for i in logits])
97
- return {
98
- 'f1': metric_f1.compute(predictions=predictions, references=labels, average='micro')['f1'],
99
- 'f1_macro': metric_f1.compute(predictions=predictions, references=labels, average='macro')['f1'],
100
- 'accuracy': metric_accuracy.compute(predictions=predictions, references=labels)['accuracy']
101
- }
102
- return compute_metric_search, compute_metric_all
103
-
104
-
105
- def main():
106
- parser = argparse.ArgumentParser(description='Fine-tuning language model.')
107
- parser.add_argument('-m', '--model', help='transformer LM', default='roberta-base', type=str)
108
- parser.add_argument('-d', '--dataset', help='', default='cardiffnlp/tweet_topic_multi', type=str)
109
- parser.add_argument('--split-train', help='', required=True, type=str)
110
- parser.add_argument('--split-validation', help='', required=True, type=str)
111
- parser.add_argument('--split-test', help='', required=True, type=str)
112
- parser.add_argument('-l', '--seq-length', help='', default=128, type=int)
113
- parser.add_argument('--random-seed', help='', default=42, type=int)
114
- parser.add_argument('--eval-step', help='', default=50, type=int)
115
- parser.add_argument('-o', '--output-dir', help='Directory to output', default='ckpt_tmp', type=str)
116
- parser.add_argument('-t', '--n-trials', default=10, type=int)
117
- parser.add_argument('--push-to-hub', action='store_true')
118
- parser.add_argument('--use-auth-token', action='store_true')
119
- parser.add_argument('--hf-organization', default=None, type=str)
120
- parser.add_argument('-a', '--model-alias', help='', default=None, type=str)
121
- parser.add_argument('--summary-file', default='metric_summary.json', type=str)
122
- parser.add_argument('--skip-train', action='store_true')
123
- parser.add_argument('--skip-eval', action='store_true')
124
- opt = parser.parse_args()
125
- assert opt.summary_file.endswith('.json'), f'`--summary-file` should be a json file {opt.summary_file}'
126
- # setup data
127
- dataset = load_dataset(opt.dataset)
128
- network = internet_connection()
129
- # setup model
130
- tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
131
- model = AutoModelForSequenceClassification.from_pretrained(
132
- opt.model,
133
- id2label=ID2LABEL,
134
- label2id=LABEL2ID,
135
- num_labels=len(dataset[opt.split_train]['label'][0]),
136
- local_files_only=not network,
137
- problem_type="multi_label_classification"
138
- )
139
- tokenized_datasets = dataset.map(
140
- lambda x: tokenizer(x["text"], padding="max_length", truncation=True, max_length=opt.seq_length),
141
- batched=True)
142
- # setup metrics
143
- compute_metric_search, compute_metric_all = get_metrics()
144
-
145
- if not opt.skip_train:
146
- # setup trainer
147
- trainer = Trainer(
148
- model=model,
149
- args=TrainingArguments(
150
- output_dir=opt.output_dir,
151
- evaluation_strategy="steps",
152
- eval_steps=opt.eval_step,
153
- seed=opt.random_seed
154
- ),
155
- train_dataset=tokenized_datasets[opt.split_train],
156
- eval_dataset=tokenized_datasets[opt.split_validation],
157
- compute_metrics=compute_metric_search,
158
- model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
159
- opt.model,
160
- return_dict=True,
161
- num_labels=len(dataset[opt.split_train]['label'][0]),
162
- id2label=ID2LABEL,
163
- label2id=LABEL2ID
164
- )
165
- )
166
- # parameter search
167
- if PARALLEL:
168
- best_run = trainer.hyperparameter_search(
169
- hp_space=lambda x: {
170
- "learning_rate": tune.loguniform(1e-6, 1e-4),
171
- "num_train_epochs": tune.choice(list(range(1, 6))),
172
- "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
173
- },
174
- local_dir=RAY_RESULTS, direction="maximize", backend="ray", n_trials=opt.n_trials,
175
- resources_per_trial={'cpu': multiprocessing.cpu_count(), "gpu": torch.cuda.device_count()},
176
-
177
- )
178
- else:
179
- best_run = trainer.hyperparameter_search(
180
- hp_space=lambda x: {
181
- "learning_rate": tune.loguniform(1e-6, 1e-4),
182
- "num_train_epochs": tune.choice(list(range(1, 6))),
183
- "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
184
- },
185
- local_dir=RAY_RESULTS, direction="maximize", backend="ray", n_trials=opt.n_trials
186
- )
187
- # finetuning
188
- for n, v in best_run.hyperparameters.items():
189
- setattr(trainer.args, n, v)
190
- trainer.train()
191
- trainer.save_model(pj(opt.output_dir, 'best_model'))
192
- best_model_path = pj(opt.output_dir, 'best_model')
193
- else:
194
- best_model_path = pj(opt.output_dir, 'best_model')
195
-
196
- # evaluation
197
- model = AutoModelForSequenceClassification.from_pretrained(
198
- best_model_path,
199
- num_labels=len(dataset[opt.split_train]['label'][0]),
200
- local_files_only=not network,
201
- problem_type="multi_label_classification",
202
- id2label=ID2LABEL,
203
- label2id=LABEL2ID
204
- )
205
- trainer = Trainer(
206
- model=model,
207
- args=TrainingArguments(
208
- output_dir=opt.output_dir,
209
- evaluation_strategy="no",
210
- seed=opt.random_seed
211
- ),
212
- train_dataset=tokenized_datasets[opt.split_train],
213
- eval_dataset=tokenized_datasets[opt.split_test],
214
- compute_metrics=compute_metric_all
215
- )
216
- summary_file = pj(opt.output_dir, opt.summary_file)
217
- if not opt.skip_eval:
218
- result = {f'test/{k}': v for k, v in trainer.evaluate().items()}
219
- logging.info(json.dumps(result, indent=4))
220
- with open(summary_file, 'w') as f:
221
- json.dump(result, f)
222
-
223
- if opt.push_to_hub:
224
- assert opt.hf_organization is not None, f'specify hf organization `--hf-organization`'
225
- assert opt.model_alias is not None, f'specify hf organization `--model-alias`'
226
- url = create_repo(opt.model_alias, organization=opt.hf_organization, exist_ok=True)
227
- # if not opt.skip_train:
228
- args = {"use_auth_token": opt.use_auth_token, "repo_url": url, "organization": opt.hf_organization}
229
- trainer.model.push_to_hub(opt.model_alias, **args)
230
- tokenizer.push_to_hub(opt.model_alias, **args)
231
- if os.path.exists(summary_file):
232
- shutil.copy2(summary_file, opt.model_alias)
233
- extra_desc = f"This model is fine-tuned on `{opt.split_train}` split and validated on `{opt.split_test}` split of tweet_topic."
234
- readme = get_readme(
235
- model_name=f"{opt.hf_organization}/{opt.model_alias}",
236
- metric=summary_file,
237
- language_model=opt.model,
238
- extra_desc= extra_desc
239
- )
240
- with open(f"{opt.model_alias}/README.md", "w") as f:
241
- f.write(readme)
242
- os.system(
243
- f"cd {opt.model_alias} && git lfs install && git add . && git commit -m 'model update' && git push && cd ../")
244
- shutil.rmtree(f"{opt.model_alias}") # clean up the cloned repo
245
-
246
-
247
- if __name__ == '__main__':
248
- main()
249
-
250
-