Upload folder using huggingface_hub
Browse files- .gitattributes +2 -0
- README.md +36 -0
- autotrain-data/train/.nfs7b5d316bebb3f8600000012f +3 -0
- autotrain-data/validation/.nfsf67d1f51a5c83f6f00000130 +3 -0
- checkpoint-66840/config.json +0 -0
- checkpoint-66840/model.safetensors +3 -0
- checkpoint-66840/optimizer.pt +3 -0
- checkpoint-66840/rng_state.pth +3 -0
- checkpoint-66840/scheduler.pt +3 -0
- checkpoint-66840/trainer_state.json +0 -0
- checkpoint-66840/training_args.bin +3 -0
- config.json +0 -0
- model.safetensors +3 -0
- runs/Jan21_00-34-29_jupyter-jimhahn/events.out.tfevents.1737419673.jupyter-jimhahn.762.0 +2 -2
- runs/Jan21_00-34-29_jupyter-jimhahn/events.out.tfevents.1737429774.jupyter-jimhahn.762.1 +3 -0
- special_tokens_map.json +7 -0
- tokenizer.json +0 -0
- tokenizer_config.json +56 -0
- training_args.bin +3 -0
- training_params.json +30 -0
- vocab.txt +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
autotrain-data/train/.nfs7b5d316bebb3f8600000012f filter=lfs diff=lfs merge=lfs -text
|
37 |
+
autotrain-data/validation/.nfsf67d1f51a5c83f6f00000130 filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
---
|
3 |
+
tags:
|
4 |
+
- autotrain
|
5 |
+
- text-classification
|
6 |
+
base_model: google-bert/bert-base-multilingual-cased
|
7 |
+
widget:
|
8 |
+
- text: "I love AutoTrain"
|
9 |
+
---
|
10 |
+
|
11 |
+
# Model Trained Using AutoTrain
|
12 |
+
|
13 |
+
- Problem type: Text Classification
|
14 |
+
|
15 |
+
## Validation Metrics
|
16 |
+
loss: 6.522752285003662
|
17 |
+
|
18 |
+
f1_macro: 0.009065770998272544
|
19 |
+
|
20 |
+
f1_micro: 0.07129627861684455
|
21 |
+
|
22 |
+
f1_weighted: 0.043941274155093045
|
23 |
+
|
24 |
+
precision_macro: 0.008029350516190904
|
25 |
+
|
26 |
+
precision_micro: 0.07129627861684455
|
27 |
+
|
28 |
+
precision_weighted: 0.03595543350538865
|
29 |
+
|
30 |
+
recall_macro: 0.014735118583896051
|
31 |
+
|
32 |
+
recall_micro: 0.07129627861684455
|
33 |
+
|
34 |
+
recall_weighted: 0.07129627861684455
|
35 |
+
|
36 |
+
accuracy: 0.07129627861684455
|
autotrain-data/train/.nfs7b5d316bebb3f8600000012f
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb65d39f6a9b263f7083a7adee65ffb0361bdf0c559c2965d0aafdc63ca42a34
|
3 |
+
size 218855368
|
autotrain-data/validation/.nfsf67d1f51a5c83f6f00000130
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:22232b2712383a39e9a704a63369bf7cd56336a231a2931c09160a802f49389f
|
3 |
+
size 62725976
|
checkpoint-66840/config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-66840/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0952e756f190f4e3e4af1f72ada1cfeee8f7c66601bad807df908677c55b22f1
|
3 |
+
size 778088080
|
checkpoint-66840/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd8a1bc5ce762f0cebc17efec22679b072869a2fe177f50279bc84979f249497
|
3 |
+
size 1556297210
|
checkpoint-66840/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f8857a2e527a571e533ba5355b60fe2ae37ffa57ec0465e923b387260e1a781
|
3 |
+
size 14244
|
checkpoint-66840/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:60cf39aa994602030ef4c19ca0fbc58206ffeac77ef4409507c86a3ace9880b5
|
3 |
+
size 1064
|
checkpoint-66840/trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-66840/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f12be06c61a45e7b77eae634e64eccc06df1e0c45a77f5dc8ea812ac1ce59a5
|
3 |
+
size 5368
|
config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0952e756f190f4e3e4af1f72ada1cfeee8f7c66601bad807df908677c55b22f1
|
3 |
+
size 778088080
|
runs/Jan21_00-34-29_jupyter-jimhahn/events.out.tfevents.1737419673.jupyter-jimhahn.762.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b19933e7f74e47183881aed642e71debbe2e9f7fff17bf39675807c0cd61e476
|
3 |
+
size 2018525
|
runs/Jan21_00-34-29_jupyter-jimhahn/events.out.tfevents.1737429774.jupyter-jimhahn.762.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3dd92bb9bbaf801c0c583ee3c9ee94b2e0c7d8408557cda4ecf749eab5c7e822
|
3 |
+
size 936
|
special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": false,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": false,
|
47 |
+
"extra_special_tokens": {},
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"pad_token": "[PAD]",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f12be06c61a45e7b77eae634e64eccc06df1e0c45a77f5dc8ea812ac1ce59a5
|
3 |
+
size 5368
|
training_params.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"data_path": "base-multilingual-gnd-bert/autotrain-data",
|
3 |
+
"model": "google-bert/bert-base-multilingual-cased",
|
4 |
+
"lr": 3e-05,
|
5 |
+
"epochs": 22,
|
6 |
+
"max_seq_length": 100,
|
7 |
+
"batch_size": 16,
|
8 |
+
"warmup_ratio": 0.1,
|
9 |
+
"gradient_accumulation": 2,
|
10 |
+
"optimizer": "adamw_torch",
|
11 |
+
"scheduler": "linear",
|
12 |
+
"weight_decay": 0.0,
|
13 |
+
"max_grad_norm": 1.0,
|
14 |
+
"seed": 42,
|
15 |
+
"train_split": "train",
|
16 |
+
"valid_split": "validation",
|
17 |
+
"text_column": "autotrain_text",
|
18 |
+
"target_column": "autotrain_label",
|
19 |
+
"logging_steps": -1,
|
20 |
+
"project_name": "base-multilingual-gnd-bert",
|
21 |
+
"auto_find_batch_size": false,
|
22 |
+
"mixed_precision": "fp16",
|
23 |
+
"save_total_limit": 1,
|
24 |
+
"push_to_hub": true,
|
25 |
+
"eval_strategy": "epoch",
|
26 |
+
"username": "jimfhahn",
|
27 |
+
"log": "tensorboard",
|
28 |
+
"early_stopping_patience": 5,
|
29 |
+
"early_stopping_threshold": 0.01
|
30 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|