Training in progress, epoch 3
Browse files- model.safetensors +1 -1
- run-9/checkpoint-114/config.json +27 -0
- run-9/checkpoint-114/model.safetensors +3 -0
- run-9/checkpoint-114/optimizer.pt +3 -0
- run-9/checkpoint-114/rng_state.pth +3 -0
- run-9/checkpoint-114/scheduler.pt +3 -0
- run-9/checkpoint-114/special_tokens_map.json +7 -0
- run-9/checkpoint-114/tokenizer.json +0 -0
- run-9/checkpoint-114/tokenizer_config.json +55 -0
- run-9/checkpoint-114/trainer_state.json +56 -0
- run-9/checkpoint-114/training_args.bin +3 -0
- run-9/checkpoint-114/vocab.txt +0 -0
- run-9/checkpoint-171/config.json +27 -0
- run-9/checkpoint-171/model.safetensors +3 -0
- run-9/checkpoint-171/optimizer.pt +3 -0
- run-9/checkpoint-171/rng_state.pth +3 -0
- run-9/checkpoint-171/scheduler.pt +3 -0
- run-9/checkpoint-171/special_tokens_map.json +7 -0
- run-9/checkpoint-171/tokenizer.json +0 -0
- run-9/checkpoint-171/tokenizer_config.json +55 -0
- run-9/checkpoint-171/trainer_state.json +65 -0
- run-9/checkpoint-171/training_args.bin +3 -0
- run-9/checkpoint-171/vocab.txt +0 -0
- run-9/checkpoint-228/config.json +27 -0
- run-9/checkpoint-228/model.safetensors +3 -0
- run-9/checkpoint-228/optimizer.pt +3 -0
- run-9/checkpoint-228/rng_state.pth +3 -0
- run-9/checkpoint-228/scheduler.pt +3 -0
- run-9/checkpoint-228/special_tokens_map.json +7 -0
- run-9/checkpoint-228/tokenizer.json +0 -0
- run-9/checkpoint-228/tokenizer_config.json +55 -0
- run-9/checkpoint-228/trainer_state.json +74 -0
- run-9/checkpoint-228/training_args.bin +3 -0
- run-9/checkpoint-228/vocab.txt +0 -0
- run-9/checkpoint-57/config.json +27 -0
- run-9/checkpoint-57/model.safetensors +3 -0
- run-9/checkpoint-57/optimizer.pt +3 -0
- run-9/checkpoint-57/rng_state.pth +3 -0
- run-9/checkpoint-57/scheduler.pt +3 -0
- run-9/checkpoint-57/special_tokens_map.json +7 -0
- run-9/checkpoint-57/tokenizer.json +0 -0
- run-9/checkpoint-57/tokenizer_config.json +55 -0
- run-9/checkpoint-57/trainer_state.json +47 -0
- run-9/checkpoint-57/training_args.bin +3 -0
- run-9/checkpoint-57/vocab.txt +0 -0
- runs/Jul31_20-26-08_3d6201a3fb09/events.out.tfevents.1722458213.3d6201a3fb09.1394.9 +3 -0
- runs/Jul31_20-26-08_3d6201a3fb09/events.out.tfevents.1722458319.3d6201a3fb09.1394.10 +3 -0
- training_args.bin +1 -1
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 437958648
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f5418a83fbe2cb26801b0fa0da68a2b582734a025be3ba1f7c0f615491f6a977
|
3 |
size 437958648
|
run-9/checkpoint-114/config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google-bert/bert-base-uncased",
|
3 |
+
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"problem_type": "single_label_classification",
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.42.4",
|
24 |
+
"type_vocab_size": 2,
|
25 |
+
"use_cache": true,
|
26 |
+
"vocab_size": 30522
|
27 |
+
}
|
run-9/checkpoint-114/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ed21b8c2ff8e072742bccb29c146959ec22cb72ef262c76da7b0d3eddf4be62e
|
3 |
+
size 437958648
|
run-9/checkpoint-114/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ce663db7063a5f21758703260705209b7898f12e729d1586127ab716bc7dbe9d
|
3 |
+
size 876038394
|
run-9/checkpoint-114/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:722f6d2ec990fc243106011beeb98fd1fbbaca0f647fc068888cec549350cd91
|
3 |
+
size 14244
|
run-9/checkpoint-114/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b371fcc5d5ac00e6eadaf7ecb572371179775c6ae53eb6b6208b19375aea808
|
3 |
+
size 1064
|
run-9/checkpoint-114/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-9/checkpoint-114/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-9/checkpoint-114/tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "BertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
run-9/checkpoint-114/trainer_state.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.6015,
|
3 |
+
"best_model_checkpoint": "BERTForDetectingDepression-Twitter2020/run-9/checkpoint-114",
|
4 |
+
"epoch": 2.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 114,
|
7 |
+
"is_hyper_param_search": true,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 1.0,
|
13 |
+
"eval_accuracy": 0.5925,
|
14 |
+
"eval_loss": 0.6679326295852661,
|
15 |
+
"eval_runtime": 2.8117,
|
16 |
+
"eval_samples_per_second": 711.306,
|
17 |
+
"eval_steps_per_second": 44.457,
|
18 |
+
"step": 57
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"epoch": 2.0,
|
22 |
+
"eval_accuracy": 0.6015,
|
23 |
+
"eval_loss": 0.6574124693870544,
|
24 |
+
"eval_runtime": 2.8097,
|
25 |
+
"eval_samples_per_second": 711.819,
|
26 |
+
"eval_steps_per_second": 44.489,
|
27 |
+
"step": 114
|
28 |
+
}
|
29 |
+
],
|
30 |
+
"logging_steps": 500,
|
31 |
+
"max_steps": 228,
|
32 |
+
"num_input_tokens_seen": 0,
|
33 |
+
"num_train_epochs": 4,
|
34 |
+
"save_steps": 500,
|
35 |
+
"stateful_callbacks": {
|
36 |
+
"TrainerControl": {
|
37 |
+
"args": {
|
38 |
+
"should_epoch_stop": false,
|
39 |
+
"should_evaluate": false,
|
40 |
+
"should_log": false,
|
41 |
+
"should_save": true,
|
42 |
+
"should_training_stop": false
|
43 |
+
},
|
44 |
+
"attributes": {}
|
45 |
+
}
|
46 |
+
},
|
47 |
+
"total_flos": 0,
|
48 |
+
"train_batch_size": 32,
|
49 |
+
"trial_name": null,
|
50 |
+
"trial_params": {
|
51 |
+
"learning_rate": 1.782025059242242e-05,
|
52 |
+
"num_train_epochs": 4,
|
53 |
+
"per_device_train_batch_size": 32,
|
54 |
+
"seed": 15
|
55 |
+
}
|
56 |
+
}
|
run-9/checkpoint-114/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41af2f26a14d5fc8872624c01f3b9e5e53a0d8cacea123564d9a339ce414456d
|
3 |
+
size 5176
|
run-9/checkpoint-114/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-9/checkpoint-171/config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google-bert/bert-base-uncased",
|
3 |
+
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"problem_type": "single_label_classification",
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.42.4",
|
24 |
+
"type_vocab_size": 2,
|
25 |
+
"use_cache": true,
|
26 |
+
"vocab_size": 30522
|
27 |
+
}
|
run-9/checkpoint-171/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2ddea832e7af836f6def1d56dd7ec4dbeda0cc3ee90dc936d20bfa0f8d7f74d
|
3 |
+
size 437958648
|
run-9/checkpoint-171/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:139cef6f1d6d0706f4d4f9094420b603a9e0dc43b8f1624a0ed6843389a4a027
|
3 |
+
size 876038394
|
run-9/checkpoint-171/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6c0a03290c85f6a93a99e04e99f771fb2715e443c6ea7ca724b20d8f2a4f0e6
|
3 |
+
size 14244
|
run-9/checkpoint-171/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91d60ddedfef4cfaf4dcc62b5c8c50178330cb81ebea36a792bd94e482c26742
|
3 |
+
size 1064
|
run-9/checkpoint-171/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-9/checkpoint-171/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-9/checkpoint-171/tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "BertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
run-9/checkpoint-171/trainer_state.json
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.6015,
|
3 |
+
"best_model_checkpoint": "BERTForDetectingDepression-Twitter2020/run-9/checkpoint-114",
|
4 |
+
"epoch": 3.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 171,
|
7 |
+
"is_hyper_param_search": true,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 1.0,
|
13 |
+
"eval_accuracy": 0.5925,
|
14 |
+
"eval_loss": 0.6679326295852661,
|
15 |
+
"eval_runtime": 2.8117,
|
16 |
+
"eval_samples_per_second": 711.306,
|
17 |
+
"eval_steps_per_second": 44.457,
|
18 |
+
"step": 57
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"epoch": 2.0,
|
22 |
+
"eval_accuracy": 0.6015,
|
23 |
+
"eval_loss": 0.6574124693870544,
|
24 |
+
"eval_runtime": 2.8097,
|
25 |
+
"eval_samples_per_second": 711.819,
|
26 |
+
"eval_steps_per_second": 44.489,
|
27 |
+
"step": 114
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"epoch": 3.0,
|
31 |
+
"eval_accuracy": 0.6,
|
32 |
+
"eval_loss": 0.668912947177887,
|
33 |
+
"eval_runtime": 2.8048,
|
34 |
+
"eval_samples_per_second": 713.073,
|
35 |
+
"eval_steps_per_second": 44.567,
|
36 |
+
"step": 171
|
37 |
+
}
|
38 |
+
],
|
39 |
+
"logging_steps": 500,
|
40 |
+
"max_steps": 228,
|
41 |
+
"num_input_tokens_seen": 0,
|
42 |
+
"num_train_epochs": 4,
|
43 |
+
"save_steps": 500,
|
44 |
+
"stateful_callbacks": {
|
45 |
+
"TrainerControl": {
|
46 |
+
"args": {
|
47 |
+
"should_epoch_stop": false,
|
48 |
+
"should_evaluate": false,
|
49 |
+
"should_log": false,
|
50 |
+
"should_save": true,
|
51 |
+
"should_training_stop": false
|
52 |
+
},
|
53 |
+
"attributes": {}
|
54 |
+
}
|
55 |
+
},
|
56 |
+
"total_flos": 0,
|
57 |
+
"train_batch_size": 32,
|
58 |
+
"trial_name": null,
|
59 |
+
"trial_params": {
|
60 |
+
"learning_rate": 1.782025059242242e-05,
|
61 |
+
"num_train_epochs": 4,
|
62 |
+
"per_device_train_batch_size": 32,
|
63 |
+
"seed": 15
|
64 |
+
}
|
65 |
+
}
|
run-9/checkpoint-171/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41af2f26a14d5fc8872624c01f3b9e5e53a0d8cacea123564d9a339ce414456d
|
3 |
+
size 5176
|
run-9/checkpoint-171/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-9/checkpoint-228/config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google-bert/bert-base-uncased",
|
3 |
+
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"problem_type": "single_label_classification",
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.42.4",
|
24 |
+
"type_vocab_size": 2,
|
25 |
+
"use_cache": true,
|
26 |
+
"vocab_size": 30522
|
27 |
+
}
|
run-9/checkpoint-228/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd222525e4422050abe8a3f5bb6280be8f415daab96ca904ea573857d7202e7d
|
3 |
+
size 437958648
|
run-9/checkpoint-228/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0842cfc7ab8e6ec78bd4752ad4f66b3048dd70ee45310361205312e9bc845a84
|
3 |
+
size 876038394
|
run-9/checkpoint-228/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b22a397b737f79b554f60b7478b98c8d7a28c9548e945f6a07f9c4ff34eae03d
|
3 |
+
size 14244
|
run-9/checkpoint-228/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bc2882219f47b5513a88d70c1e356b057af5fd39fc12f5ab88a5185dd20e2669
|
3 |
+
size 1064
|
run-9/checkpoint-228/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-9/checkpoint-228/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-9/checkpoint-228/tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "BertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
run-9/checkpoint-228/trainer_state.json
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.616,
|
3 |
+
"best_model_checkpoint": "BERTForDetectingDepression-Twitter2020/run-9/checkpoint-228",
|
4 |
+
"epoch": 4.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 228,
|
7 |
+
"is_hyper_param_search": true,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 1.0,
|
13 |
+
"eval_accuracy": 0.5925,
|
14 |
+
"eval_loss": 0.6679326295852661,
|
15 |
+
"eval_runtime": 2.8117,
|
16 |
+
"eval_samples_per_second": 711.306,
|
17 |
+
"eval_steps_per_second": 44.457,
|
18 |
+
"step": 57
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"epoch": 2.0,
|
22 |
+
"eval_accuracy": 0.6015,
|
23 |
+
"eval_loss": 0.6574124693870544,
|
24 |
+
"eval_runtime": 2.8097,
|
25 |
+
"eval_samples_per_second": 711.819,
|
26 |
+
"eval_steps_per_second": 44.489,
|
27 |
+
"step": 114
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"epoch": 3.0,
|
31 |
+
"eval_accuracy": 0.6,
|
32 |
+
"eval_loss": 0.668912947177887,
|
33 |
+
"eval_runtime": 2.8048,
|
34 |
+
"eval_samples_per_second": 713.073,
|
35 |
+
"eval_steps_per_second": 44.567,
|
36 |
+
"step": 171
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"epoch": 4.0,
|
40 |
+
"eval_accuracy": 0.616,
|
41 |
+
"eval_loss": 0.679675281047821,
|
42 |
+
"eval_runtime": 2.8911,
|
43 |
+
"eval_samples_per_second": 691.785,
|
44 |
+
"eval_steps_per_second": 43.237,
|
45 |
+
"step": 228
|
46 |
+
}
|
47 |
+
],
|
48 |
+
"logging_steps": 500,
|
49 |
+
"max_steps": 228,
|
50 |
+
"num_input_tokens_seen": 0,
|
51 |
+
"num_train_epochs": 4,
|
52 |
+
"save_steps": 500,
|
53 |
+
"stateful_callbacks": {
|
54 |
+
"TrainerControl": {
|
55 |
+
"args": {
|
56 |
+
"should_epoch_stop": false,
|
57 |
+
"should_evaluate": false,
|
58 |
+
"should_log": false,
|
59 |
+
"should_save": true,
|
60 |
+
"should_training_stop": true
|
61 |
+
},
|
62 |
+
"attributes": {}
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"total_flos": 0,
|
66 |
+
"train_batch_size": 32,
|
67 |
+
"trial_name": null,
|
68 |
+
"trial_params": {
|
69 |
+
"learning_rate": 1.782025059242242e-05,
|
70 |
+
"num_train_epochs": 4,
|
71 |
+
"per_device_train_batch_size": 32,
|
72 |
+
"seed": 15
|
73 |
+
}
|
74 |
+
}
|
run-9/checkpoint-228/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41af2f26a14d5fc8872624c01f3b9e5e53a0d8cacea123564d9a339ce414456d
|
3 |
+
size 5176
|
run-9/checkpoint-228/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-9/checkpoint-57/config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google-bert/bert-base-uncased",
|
3 |
+
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"problem_type": "single_label_classification",
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.42.4",
|
24 |
+
"type_vocab_size": 2,
|
25 |
+
"use_cache": true,
|
26 |
+
"vocab_size": 30522
|
27 |
+
}
|
run-9/checkpoint-57/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf93323c350b839100ff09b11510e885da3503a020244166ea79cca84854bb49
|
3 |
+
size 437958648
|
run-9/checkpoint-57/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a44c320afd0118c50d9bf2b6fd0de091726f7f084e00abd6645818bd0873121d
|
3 |
+
size 876038394
|
run-9/checkpoint-57/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea9d2c8aaadc11b9172006d06cd7e80ffee603969ec3e27d3c97c3b49bd8a474
|
3 |
+
size 14244
|
run-9/checkpoint-57/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bfb884cc948444c6d5172ec01743ed1653116160685dec64ae4ff8331af76d18
|
3 |
+
size 1064
|
run-9/checkpoint-57/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-9/checkpoint-57/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-9/checkpoint-57/tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "BertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
run-9/checkpoint-57/trainer_state.json
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.5925,
|
3 |
+
"best_model_checkpoint": "BERTForDetectingDepression-Twitter2020/run-9/checkpoint-57",
|
4 |
+
"epoch": 1.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 57,
|
7 |
+
"is_hyper_param_search": true,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 1.0,
|
13 |
+
"eval_accuracy": 0.5925,
|
14 |
+
"eval_loss": 0.6679326295852661,
|
15 |
+
"eval_runtime": 2.8117,
|
16 |
+
"eval_samples_per_second": 711.306,
|
17 |
+
"eval_steps_per_second": 44.457,
|
18 |
+
"step": 57
|
19 |
+
}
|
20 |
+
],
|
21 |
+
"logging_steps": 500,
|
22 |
+
"max_steps": 228,
|
23 |
+
"num_input_tokens_seen": 0,
|
24 |
+
"num_train_epochs": 4,
|
25 |
+
"save_steps": 500,
|
26 |
+
"stateful_callbacks": {
|
27 |
+
"TrainerControl": {
|
28 |
+
"args": {
|
29 |
+
"should_epoch_stop": false,
|
30 |
+
"should_evaluate": false,
|
31 |
+
"should_log": false,
|
32 |
+
"should_save": true,
|
33 |
+
"should_training_stop": false
|
34 |
+
},
|
35 |
+
"attributes": {}
|
36 |
+
}
|
37 |
+
},
|
38 |
+
"total_flos": 0,
|
39 |
+
"train_batch_size": 32,
|
40 |
+
"trial_name": null,
|
41 |
+
"trial_params": {
|
42 |
+
"learning_rate": 1.782025059242242e-05,
|
43 |
+
"num_train_epochs": 4,
|
44 |
+
"per_device_train_batch_size": 32,
|
45 |
+
"seed": 15
|
46 |
+
}
|
47 |
+
}
|
run-9/checkpoint-57/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41af2f26a14d5fc8872624c01f3b9e5e53a0d8cacea123564d9a339ce414456d
|
3 |
+
size 5176
|
run-9/checkpoint-57/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
runs/Jul31_20-26-08_3d6201a3fb09/events.out.tfevents.1722458213.3d6201a3fb09.1394.9
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8599b759acac78bb1167c09f84eae00ab66ec669f8ca79dfcb329d2ea086e2da
|
3 |
+
size 6557
|
runs/Jul31_20-26-08_3d6201a3fb09/events.out.tfevents.1722458319.3d6201a3fb09.1394.10
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7aee9d3e65d6a32c3a5a69cb01bb997afce87a946bae3b2fe6d9089087ae50ca
|
3 |
+
size 6736
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5176
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41af2f26a14d5fc8872624c01f3b9e5e53a0d8cacea123564d9a339ce414456d
|
3 |
size 5176
|