Add trained sentiment model and tokenizer
Browse files- .gitignore +7 -0
- config.json +25 -0
- model.safetensors +3 -0
- results/checkpoint-250/config.json +25 -0
- results/checkpoint-250/model.safetensors +3 -0
- results/checkpoint-250/optimizer.pt +3 -0
- results/checkpoint-250/rng_state.pth +3 -0
- results/checkpoint-250/scheduler.pt +3 -0
- results/checkpoint-250/special_tokens_map.json +7 -0
- results/checkpoint-250/tokenizer.json +0 -0
- results/checkpoint-250/tokenizer_config.json +55 -0
- results/checkpoint-250/trainer_state.json +64 -0
- results/checkpoint-250/training_args.bin +3 -0
- results/checkpoint-250/vocab.txt +0 -0
- results/checkpoint-500/config.json +25 -0
- results/checkpoint-500/model.safetensors +3 -0
- results/checkpoint-500/optimizer.pt +3 -0
- results/checkpoint-500/rng_state.pth +3 -0
- results/checkpoint-500/scheduler.pt +3 -0
- results/checkpoint-500/special_tokens_map.json +7 -0
- results/checkpoint-500/tokenizer.json +0 -0
- results/checkpoint-500/tokenizer_config.json +55 -0
- results/checkpoint-500/trainer_state.json +107 -0
- results/checkpoint-500/training_args.bin +3 -0
- results/checkpoint-500/vocab.txt +0 -0
- results/checkpoint-750/config.json +25 -0
- results/checkpoint-750/model.safetensors +3 -0
- results/checkpoint-750/optimizer.pt +3 -0
- results/checkpoint-750/rng_state.pth +3 -0
- results/checkpoint-750/scheduler.pt +3 -0
- results/checkpoint-750/special_tokens_map.json +7 -0
- results/checkpoint-750/tokenizer.json +0 -0
- results/checkpoint-750/tokenizer_config.json +55 -0
- results/checkpoint-750/trainer_state.json +150 -0
- results/checkpoint-750/training_args.bin +3 -0
- results/checkpoint-750/vocab.txt +0 -0
- special_tokens_map.json +7 -0
- tokenizer.json +0 -0
- tokenizer_config.json +55 -0
- train_sentiment_model.py +59 -0
- vocab.txt +0 -0
.gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
venv310/
|
2 |
+
__pycache__/
|
3 |
+
*.pyc
|
4 |
+
*.pyo
|
5 |
+
*.pyd
|
6 |
+
*.log
|
7 |
+
*.DS_Store
|
config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-uncased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForSequenceClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"max_position_embeddings": 512,
|
13 |
+
"model_type": "distilbert",
|
14 |
+
"n_heads": 12,
|
15 |
+
"n_layers": 6,
|
16 |
+
"pad_token_id": 0,
|
17 |
+
"problem_type": "single_label_classification",
|
18 |
+
"qa_dropout": 0.1,
|
19 |
+
"seq_classif_dropout": 0.2,
|
20 |
+
"sinusoidal_pos_embds": false,
|
21 |
+
"tie_weights_": true,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.40.1",
|
24 |
+
"vocab_size": 30522
|
25 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c229880bd9d0f94d9c0fdb08e9750f71cf475f96fba14155833c41994389bf2a
|
3 |
+
size 267832560
|
results/checkpoint-250/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-uncased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForSequenceClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"max_position_embeddings": 512,
|
13 |
+
"model_type": "distilbert",
|
14 |
+
"n_heads": 12,
|
15 |
+
"n_layers": 6,
|
16 |
+
"pad_token_id": 0,
|
17 |
+
"problem_type": "single_label_classification",
|
18 |
+
"qa_dropout": 0.1,
|
19 |
+
"seq_classif_dropout": 0.2,
|
20 |
+
"sinusoidal_pos_embds": false,
|
21 |
+
"tie_weights_": true,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.40.1",
|
24 |
+
"vocab_size": 30522
|
25 |
+
}
|
results/checkpoint-250/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:22ae47ca212efc45aaa3075ba0ad22ad1f1eccaf4b7e65c8d8995aa6d49b1224
|
3 |
+
size 267832560
|
results/checkpoint-250/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8661a7472efda2437d17a7971250b621ff19b9b75e0d19f610c1e8af6aafd1d0
|
3 |
+
size 535724875
|
results/checkpoint-250/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a06415bb43c924d2a25f8b3c831e8f97886f4e932a4b3fcbbea178458ccd14d
|
3 |
+
size 14455
|
results/checkpoint-250/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c6fca1d0d5cc6c340c241d864da3a023dc41548ff1130eb59cb86627cad2626c
|
3 |
+
size 1465
|
results/checkpoint-250/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
results/checkpoint-250/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
results/checkpoint-250/tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
results/checkpoint-250/trainer_state.json
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 1.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 250,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.2,
|
13 |
+
"grad_norm": 2.609426498413086,
|
14 |
+
"learning_rate": 4.666666666666667e-05,
|
15 |
+
"loss": 0.6223,
|
16 |
+
"step": 50
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.4,
|
20 |
+
"grad_norm": 8.905411720275879,
|
21 |
+
"learning_rate": 4.3333333333333334e-05,
|
22 |
+
"loss": 0.4075,
|
23 |
+
"step": 100
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.6,
|
27 |
+
"grad_norm": 11.050198554992676,
|
28 |
+
"learning_rate": 4e-05,
|
29 |
+
"loss": 0.4125,
|
30 |
+
"step": 150
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.8,
|
34 |
+
"grad_norm": 5.73728084564209,
|
35 |
+
"learning_rate": 3.6666666666666666e-05,
|
36 |
+
"loss": 0.4522,
|
37 |
+
"step": 200
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 1.0,
|
41 |
+
"grad_norm": 8.976237297058105,
|
42 |
+
"learning_rate": 3.3333333333333335e-05,
|
43 |
+
"loss": 0.3458,
|
44 |
+
"step": 250
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 1.0,
|
48 |
+
"eval_loss": 0.3327711224555969,
|
49 |
+
"eval_runtime": 29.3806,
|
50 |
+
"eval_samples_per_second": 17.018,
|
51 |
+
"eval_steps_per_second": 2.144,
|
52 |
+
"step": 250
|
53 |
+
}
|
54 |
+
],
|
55 |
+
"logging_steps": 50,
|
56 |
+
"max_steps": 750,
|
57 |
+
"num_input_tokens_seen": 0,
|
58 |
+
"num_train_epochs": 3,
|
59 |
+
"save_steps": 500,
|
60 |
+
"total_flos": 132467398656000.0,
|
61 |
+
"train_batch_size": 8,
|
62 |
+
"trial_name": null,
|
63 |
+
"trial_params": null
|
64 |
+
}
|
results/checkpoint-250/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0bbea30fa528a706a75824b0dc6a7c088554c90e711ee73c1b8e434dd2b4f6d4
|
3 |
+
size 5329
|
results/checkpoint-250/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
results/checkpoint-500/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-uncased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForSequenceClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"max_position_embeddings": 512,
|
13 |
+
"model_type": "distilbert",
|
14 |
+
"n_heads": 12,
|
15 |
+
"n_layers": 6,
|
16 |
+
"pad_token_id": 0,
|
17 |
+
"problem_type": "single_label_classification",
|
18 |
+
"qa_dropout": 0.1,
|
19 |
+
"seq_classif_dropout": 0.2,
|
20 |
+
"sinusoidal_pos_embds": false,
|
21 |
+
"tie_weights_": true,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.40.1",
|
24 |
+
"vocab_size": 30522
|
25 |
+
}
|
results/checkpoint-500/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73e2aacd21969ddb68e2d652faa616d76b49e909f652d9d8d4426e39e6d719de
|
3 |
+
size 267832560
|
results/checkpoint-500/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:33b9d16d976ac212e14b2466e44873705f1807a2d6aeb9b7d9257a7b48025f60
|
3 |
+
size 535724875
|
results/checkpoint-500/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e21af60e88f2ecaa42642ddcb3f6d94b5832e377b29cf61b9183dbde2e91fe26
|
3 |
+
size 14455
|
results/checkpoint-500/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac0bf09babf9ab3b0323bd8c33087573f0c0c2f6fd6af50cb8d498cd6f1363fb
|
3 |
+
size 1465
|
results/checkpoint-500/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
results/checkpoint-500/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
results/checkpoint-500/tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
results/checkpoint-500/trainer_state.json
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 2.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 500,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.2,
|
13 |
+
"grad_norm": 2.609426498413086,
|
14 |
+
"learning_rate": 4.666666666666667e-05,
|
15 |
+
"loss": 0.6223,
|
16 |
+
"step": 50
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.4,
|
20 |
+
"grad_norm": 8.905411720275879,
|
21 |
+
"learning_rate": 4.3333333333333334e-05,
|
22 |
+
"loss": 0.4075,
|
23 |
+
"step": 100
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.6,
|
27 |
+
"grad_norm": 11.050198554992676,
|
28 |
+
"learning_rate": 4e-05,
|
29 |
+
"loss": 0.4125,
|
30 |
+
"step": 150
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.8,
|
34 |
+
"grad_norm": 5.73728084564209,
|
35 |
+
"learning_rate": 3.6666666666666666e-05,
|
36 |
+
"loss": 0.4522,
|
37 |
+
"step": 200
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 1.0,
|
41 |
+
"grad_norm": 8.976237297058105,
|
42 |
+
"learning_rate": 3.3333333333333335e-05,
|
43 |
+
"loss": 0.3458,
|
44 |
+
"step": 250
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 1.0,
|
48 |
+
"eval_loss": 0.3327711224555969,
|
49 |
+
"eval_runtime": 29.3806,
|
50 |
+
"eval_samples_per_second": 17.018,
|
51 |
+
"eval_steps_per_second": 2.144,
|
52 |
+
"step": 250
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"epoch": 1.2,
|
56 |
+
"grad_norm": 11.21533489227295,
|
57 |
+
"learning_rate": 3e-05,
|
58 |
+
"loss": 0.2181,
|
59 |
+
"step": 300
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"epoch": 1.4,
|
63 |
+
"grad_norm": 6.712032318115234,
|
64 |
+
"learning_rate": 2.6666666666666667e-05,
|
65 |
+
"loss": 0.3282,
|
66 |
+
"step": 350
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"epoch": 1.6,
|
70 |
+
"grad_norm": 0.10278991609811783,
|
71 |
+
"learning_rate": 2.3333333333333336e-05,
|
72 |
+
"loss": 0.1943,
|
73 |
+
"step": 400
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"epoch": 1.8,
|
77 |
+
"grad_norm": 1.1258127689361572,
|
78 |
+
"learning_rate": 2e-05,
|
79 |
+
"loss": 0.1923,
|
80 |
+
"step": 450
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"epoch": 2.0,
|
84 |
+
"grad_norm": 17.39550018310547,
|
85 |
+
"learning_rate": 1.6666666666666667e-05,
|
86 |
+
"loss": 0.2048,
|
87 |
+
"step": 500
|
88 |
+
},
|
89 |
+
{
|
90 |
+
"epoch": 2.0,
|
91 |
+
"eval_loss": 0.48086223006248474,
|
92 |
+
"eval_runtime": 31.6622,
|
93 |
+
"eval_samples_per_second": 15.792,
|
94 |
+
"eval_steps_per_second": 1.99,
|
95 |
+
"step": 500
|
96 |
+
}
|
97 |
+
],
|
98 |
+
"logging_steps": 50,
|
99 |
+
"max_steps": 750,
|
100 |
+
"num_input_tokens_seen": 0,
|
101 |
+
"num_train_epochs": 3,
|
102 |
+
"save_steps": 500,
|
103 |
+
"total_flos": 264934797312000.0,
|
104 |
+
"train_batch_size": 8,
|
105 |
+
"trial_name": null,
|
106 |
+
"trial_params": null
|
107 |
+
}
|
results/checkpoint-500/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0bbea30fa528a706a75824b0dc6a7c088554c90e711ee73c1b8e434dd2b4f6d4
|
3 |
+
size 5329
|
results/checkpoint-500/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
results/checkpoint-750/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-uncased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForSequenceClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"max_position_embeddings": 512,
|
13 |
+
"model_type": "distilbert",
|
14 |
+
"n_heads": 12,
|
15 |
+
"n_layers": 6,
|
16 |
+
"pad_token_id": 0,
|
17 |
+
"problem_type": "single_label_classification",
|
18 |
+
"qa_dropout": 0.1,
|
19 |
+
"seq_classif_dropout": 0.2,
|
20 |
+
"sinusoidal_pos_embds": false,
|
21 |
+
"tie_weights_": true,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.40.1",
|
24 |
+
"vocab_size": 30522
|
25 |
+
}
|
results/checkpoint-750/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c229880bd9d0f94d9c0fdb08e9750f71cf475f96fba14155833c41994389bf2a
|
3 |
+
size 267832560
|
results/checkpoint-750/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9249d4db419ad8ef15ab226780cbc1eb5de92b827d7142f2707b6eb732f5922
|
3 |
+
size 535724875
|
results/checkpoint-750/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8dce7e9600f4d681c780b791cebbe3fb30752d4e14911ec457d8543b3c4e151f
|
3 |
+
size 14455
|
results/checkpoint-750/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a68ad7dcf06894d3c77081144d7e8163065b2ebe974de0b8563fb23664540f4f
|
3 |
+
size 1465
|
results/checkpoint-750/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
results/checkpoint-750/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
results/checkpoint-750/tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
results/checkpoint-750/trainer_state.json
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 3.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 750,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.2,
|
13 |
+
"grad_norm": 2.609426498413086,
|
14 |
+
"learning_rate": 4.666666666666667e-05,
|
15 |
+
"loss": 0.6223,
|
16 |
+
"step": 50
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.4,
|
20 |
+
"grad_norm": 8.905411720275879,
|
21 |
+
"learning_rate": 4.3333333333333334e-05,
|
22 |
+
"loss": 0.4075,
|
23 |
+
"step": 100
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.6,
|
27 |
+
"grad_norm": 11.050198554992676,
|
28 |
+
"learning_rate": 4e-05,
|
29 |
+
"loss": 0.4125,
|
30 |
+
"step": 150
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.8,
|
34 |
+
"grad_norm": 5.73728084564209,
|
35 |
+
"learning_rate": 3.6666666666666666e-05,
|
36 |
+
"loss": 0.4522,
|
37 |
+
"step": 200
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 1.0,
|
41 |
+
"grad_norm": 8.976237297058105,
|
42 |
+
"learning_rate": 3.3333333333333335e-05,
|
43 |
+
"loss": 0.3458,
|
44 |
+
"step": 250
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 1.0,
|
48 |
+
"eval_loss": 0.3327711224555969,
|
49 |
+
"eval_runtime": 29.3806,
|
50 |
+
"eval_samples_per_second": 17.018,
|
51 |
+
"eval_steps_per_second": 2.144,
|
52 |
+
"step": 250
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"epoch": 1.2,
|
56 |
+
"grad_norm": 11.21533489227295,
|
57 |
+
"learning_rate": 3e-05,
|
58 |
+
"loss": 0.2181,
|
59 |
+
"step": 300
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"epoch": 1.4,
|
63 |
+
"grad_norm": 6.712032318115234,
|
64 |
+
"learning_rate": 2.6666666666666667e-05,
|
65 |
+
"loss": 0.3282,
|
66 |
+
"step": 350
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"epoch": 1.6,
|
70 |
+
"grad_norm": 0.10278991609811783,
|
71 |
+
"learning_rate": 2.3333333333333336e-05,
|
72 |
+
"loss": 0.1943,
|
73 |
+
"step": 400
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"epoch": 1.8,
|
77 |
+
"grad_norm": 1.1258127689361572,
|
78 |
+
"learning_rate": 2e-05,
|
79 |
+
"loss": 0.1923,
|
80 |
+
"step": 450
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"epoch": 2.0,
|
84 |
+
"grad_norm": 17.39550018310547,
|
85 |
+
"learning_rate": 1.6666666666666667e-05,
|
86 |
+
"loss": 0.2048,
|
87 |
+
"step": 500
|
88 |
+
},
|
89 |
+
{
|
90 |
+
"epoch": 2.0,
|
91 |
+
"eval_loss": 0.48086223006248474,
|
92 |
+
"eval_runtime": 31.6622,
|
93 |
+
"eval_samples_per_second": 15.792,
|
94 |
+
"eval_steps_per_second": 1.99,
|
95 |
+
"step": 500
|
96 |
+
},
|
97 |
+
{
|
98 |
+
"epoch": 2.2,
|
99 |
+
"grad_norm": 69.4999008178711,
|
100 |
+
"learning_rate": 1.3333333333333333e-05,
|
101 |
+
"loss": 0.128,
|
102 |
+
"step": 550
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"epoch": 2.4,
|
106 |
+
"grad_norm": 0.0629974976181984,
|
107 |
+
"learning_rate": 1e-05,
|
108 |
+
"loss": 0.1258,
|
109 |
+
"step": 600
|
110 |
+
},
|
111 |
+
{
|
112 |
+
"epoch": 2.6,
|
113 |
+
"grad_norm": 3.221457004547119,
|
114 |
+
"learning_rate": 6.666666666666667e-06,
|
115 |
+
"loss": 0.1061,
|
116 |
+
"step": 650
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"epoch": 2.8,
|
120 |
+
"grad_norm": 0.05550043657422066,
|
121 |
+
"learning_rate": 3.3333333333333333e-06,
|
122 |
+
"loss": 0.1103,
|
123 |
+
"step": 700
|
124 |
+
},
|
125 |
+
{
|
126 |
+
"epoch": 3.0,
|
127 |
+
"grad_norm": 0.24818731844425201,
|
128 |
+
"learning_rate": 0.0,
|
129 |
+
"loss": 0.0512,
|
130 |
+
"step": 750
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"epoch": 3.0,
|
134 |
+
"eval_loss": 0.5193189978599548,
|
135 |
+
"eval_runtime": 29.0108,
|
136 |
+
"eval_samples_per_second": 17.235,
|
137 |
+
"eval_steps_per_second": 2.172,
|
138 |
+
"step": 750
|
139 |
+
}
|
140 |
+
],
|
141 |
+
"logging_steps": 50,
|
142 |
+
"max_steps": 750,
|
143 |
+
"num_input_tokens_seen": 0,
|
144 |
+
"num_train_epochs": 3,
|
145 |
+
"save_steps": 500,
|
146 |
+
"total_flos": 397402195968000.0,
|
147 |
+
"train_batch_size": 8,
|
148 |
+
"trial_name": null,
|
149 |
+
"trial_params": null
|
150 |
+
}
|
results/checkpoint-750/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0bbea30fa528a706a75824b0dc6a7c088554c90e711ee73c1b8e434dd2b4f6d4
|
3 |
+
size 5329
|
results/checkpoint-750/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
train_sentiment_model.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
from transformers import (
|
3 |
+
AutoTokenizer,
|
4 |
+
AutoModelForSequenceClassification,
|
5 |
+
Trainer,
|
6 |
+
TrainingArguments,
|
7 |
+
)
|
8 |
+
import torch
|
9 |
+
|
10 |
+
# STEP 1: Load IMDb Dataset
|
11 |
+
dataset = load_dataset("imdb")
|
12 |
+
|
13 |
+
# STEP 2: Tokenize the Data
|
14 |
+
checkpoint = "distilbert-base-uncased"
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
16 |
+
|
17 |
+
def preprocess(example):
|
18 |
+
return tokenizer(example["text"], truncation=True, padding="max_length", max_length=256)
|
19 |
+
|
20 |
+
tokenized = dataset.map(preprocess, batched=True)
|
21 |
+
tokenized = tokenized.remove_columns(["text"])
|
22 |
+
tokenized = tokenized.rename_column("label", "labels")
|
23 |
+
tokenized.set_format("torch")
|
24 |
+
|
25 |
+
# Use a smaller subset for quick training
|
26 |
+
train_dataset = tokenized["train"].shuffle(seed=42).select(range(2000))
|
27 |
+
val_dataset = tokenized["test"].shuffle(seed=42).select(range(500))
|
28 |
+
|
29 |
+
# STEP 3: Load Model
|
30 |
+
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
|
31 |
+
|
32 |
+
# STEP 4: Define Training Arguments
|
33 |
+
training_args = TrainingArguments(
|
34 |
+
output_dir="./results",
|
35 |
+
evaluation_strategy="epoch",
|
36 |
+
save_strategy="epoch",
|
37 |
+
num_train_epochs=3,
|
38 |
+
per_device_train_batch_size=8,
|
39 |
+
per_device_eval_batch_size=8,
|
40 |
+
logging_dir="./logs",
|
41 |
+
logging_steps=50,
|
42 |
+
report_to="none"
|
43 |
+
)
|
44 |
+
|
45 |
+
# STEP 5: Train
|
46 |
+
trainer = Trainer(
|
47 |
+
model=model,
|
48 |
+
args=training_args,
|
49 |
+
train_dataset=train_dataset,
|
50 |
+
eval_dataset=val_dataset,
|
51 |
+
tokenizer=tokenizer,
|
52 |
+
)
|
53 |
+
|
54 |
+
trainer.train()
|
55 |
+
|
56 |
+
# STEP 6: Save Locally to Repo Folder
|
57 |
+
model.save_pretrained("./")
|
58 |
+
tokenizer.save_pretrained("./")
|
59 |
+
print("✅ Model and tokenizer saved locally!")
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|