ASCCCCCCCC commited on
Commit
de22de8
·
1 Parent(s): a867b6e

Training in progress, step 500

Browse files
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c1766a456bcb1fef500c04ffa1c0825229c610db410a0edcd255704cf87fdc9
3
  size 409160877
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9802af58de7b15dad2bba82ef06800e163ef26009f87ea10b38c04a8f397ad34
3
  size 409160877
runs/Apr14_07-03-44_78864dc90326/1649921869.503068/events.out.tfevents.1649921869.78864dc90326.33.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22d37ab459a4d2c2428a2918d5c0d3b9e39ceca08c5b7d9cb164b5db13bed712
3
+ size 4793
runs/Apr14_07-03-44_78864dc90326/events.out.tfevents.1649921869.78864dc90326.33.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:979fc88f22a15165d2f9b8481f5b98c2760890d88495c87dde9b4cacc64b296b
3
+ size 3642
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": false, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "tokenizer_file": "/root/.cache/huggingface/transformers/7e23f4e1f58f867d672f84d9a459826e41cea3be6d0fe62502ddce9920f57e48.4495f7812b44ff0568ce7c4ff3fdbb2bac5eaf330440ffa30f46893bf749184d", "name_or_path": "bert-base-chinese", "tokenizer_class": "BertTokenizer"}
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-base-chinese", "tokenizer_class": "BertTokenizer"}