Portuguese
tharindu commited on
Commit
aedf11a
·
verified ·
1 Parent(s): 74b4389

Upload folder using huggingface_hub

Browse files
Files changed (38) hide show
  1. best_model/decoder/config.json +34 -0
  2. best_model/decoder/generation_config.json +5 -0
  3. best_model/decoder/pytorch_model.bin +3 -0
  4. best_model/decoder/special_tokens_map.json +7 -0
  5. best_model/decoder/tokenizer.json +0 -0
  6. best_model/decoder/tokenizer_config.json +15 -0
  7. best_model/decoder/vocab.txt +0 -0
  8. best_model/encoder/config.json +32 -0
  9. best_model/encoder/pytorch_model.bin +3 -0
  10. best_model/encoder/special_tokens_map.json +7 -0
  11. best_model/encoder/tokenizer.json +0 -0
  12. best_model/encoder/tokenizer_config.json +15 -0
  13. best_model/encoder/vocab.txt +0 -0
  14. best_model/eval_results.txt +1 -0
  15. best_model/model_args.json +1 -0
  16. best_model/optimizer.pt +3 -0
  17. best_model/scheduler.pt +3 -0
  18. best_model/training_args.bin +3 -0
  19. checkpoint-32000/decoder/config.json +34 -0
  20. checkpoint-32000/decoder/generation_config.json +5 -0
  21. checkpoint-32000/decoder/pytorch_model.bin +3 -0
  22. checkpoint-32000/decoder/special_tokens_map.json +7 -0
  23. checkpoint-32000/decoder/tokenizer.json +0 -0
  24. checkpoint-32000/decoder/tokenizer_config.json +15 -0
  25. checkpoint-32000/decoder/vocab.txt +0 -0
  26. checkpoint-32000/encoder/config.json +32 -0
  27. checkpoint-32000/encoder/pytorch_model.bin +3 -0
  28. checkpoint-32000/encoder/special_tokens_map.json +7 -0
  29. checkpoint-32000/encoder/tokenizer.json +0 -0
  30. checkpoint-32000/encoder/tokenizer_config.json +15 -0
  31. checkpoint-32000/encoder/vocab.txt +0 -0
  32. checkpoint-32000/eval_results.txt +1 -0
  33. checkpoint-32000/model_args.json +1 -0
  34. checkpoint-32000/optimizer.pt +3 -0
  35. checkpoint-32000/scheduler.pt +3 -0
  36. checkpoint-32000/training_args.bin +3 -0
  37. eval_results.txt +1 -0
  38. training_progress_scores.csv +14 -0
best_model/decoder/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "neuralmind/bert-large-portuguese-cased",
3
+ "add_cross_attention": true,
4
+ "architectures": [
5
+ "BertLMHeadModel"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "directionality": "bidi",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "is_decoder": true,
16
+ "layer_norm_eps": 1e-12,
17
+ "max_position_embeddings": 512,
18
+ "model_type": "bert",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
+ "pooler_fc_size": 768,
24
+ "pooler_num_attention_heads": 12,
25
+ "pooler_num_fc_layers": 3,
26
+ "pooler_size_per_head": 128,
27
+ "pooler_type": "first_token_transform",
28
+ "position_embedding_type": "absolute",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.29.2",
31
+ "type_vocab_size": 2,
32
+ "use_cache": true,
33
+ "vocab_size": 29794
34
+ }
best_model/decoder/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.29.2"
5
+ }
best_model/decoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b96e504df876ce36f630a893a3c2404241e93073628c0ba1b8a9d1e514377d80
3
+ size 1741179769
best_model/decoder/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
best_model/decoder/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
best_model/decoder/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
best_model/decoder/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
best_model/encoder/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "neuralmind/bert-large-portuguese-cased",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 24,
19
+ "output_past": true,
20
+ "pad_token_id": 0,
21
+ "pooler_fc_size": 768,
22
+ "pooler_num_attention_heads": 12,
23
+ "pooler_num_fc_layers": 3,
24
+ "pooler_size_per_head": 128,
25
+ "pooler_type": "first_token_transform",
26
+ "position_embedding_type": "absolute",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.29.2",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 29794
32
+ }
best_model/encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ea7f79560886b789f62ea3b1ecf5a910a83495dfb4d4b9f4ab22c5ce1966775
3
+ size 1337721965
best_model/encoder/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
best_model/encoder/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
best_model/encoder/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
best_model/encoder/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
best_model/eval_results.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ eval_loss = 0.0010984839246040532
best_model/model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": true, "adafactor_scale_parameter": true, "adafactor_warmup_init": true, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "outputs/bertimbau/best_model", "cache_dir": "cache_dir/bertimbau", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 25, "encoding": null, "eval_batch_size": 8, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 3200, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 0.0001, "local_rank": -1, "logging_steps": 3200, "loss_type": null, "loss_args": {}, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 256, "model_name": "neuralmind/bert-large-portuguese-cased-neuralmind/bert-large-portuguese-cased", "model_type": "bert-bert", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 10, "optimizer": "AdamW", "output_dir": "outputs/bertimbau", "overwrite_output_dir": true, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 78, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": 3200, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_name": null, "tokenizer_type": null, "train_batch_size": 8, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {"name": "neuralmind/bert-large-portuguese-cased"}, "wandb_project": "DORE", "warmup_ratio": 0.06, "warmup_steps": 4946, "weight_decay": 0.0, "model_class": "Seq2SeqModel", "base_marian_model_name": null, "dataset_class": null, "dataset_cache_dir": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": true, "faiss_d": 768, "faiss_m": 128, "include_title_in_knowledge_dataset": true, "length_penalty": 2.0, "max_length": 20, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "rag_embed_batch_size": 16, "repetition_penalty": 1.0, "save_knowledge_dataset": true, "save_knowledge_dataset_with_checkpoints": false, "save_recent_only": true, "split_text_character": " ", "split_text_n": 100, "src_lang": "en_XX", "tgt_lang": "ro_RO", "top_k": null, "top_p": null, "use_multiprocessed_decoding": false}
best_model/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:512d059ea10af4122611126111a8250cb78db6b2ce32dafcaffec6679d4d696d
3
+ size 6149550514
best_model/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e256f319386c1ba3833ac29cb14870f2e80ce6f86a6405851d9a5a16fcb97e1
3
+ size 627
best_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32809fef074c37b064766f37c6bb8656107d2d7170e76df7dbef8e31b1b2e50e
3
+ size 3643
checkpoint-32000/decoder/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "neuralmind/bert-large-portuguese-cased",
3
+ "add_cross_attention": true,
4
+ "architectures": [
5
+ "BertLMHeadModel"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "directionality": "bidi",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "is_decoder": true,
16
+ "layer_norm_eps": 1e-12,
17
+ "max_position_embeddings": 512,
18
+ "model_type": "bert",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
+ "pooler_fc_size": 768,
24
+ "pooler_num_attention_heads": 12,
25
+ "pooler_num_fc_layers": 3,
26
+ "pooler_size_per_head": 128,
27
+ "pooler_type": "first_token_transform",
28
+ "position_embedding_type": "absolute",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.29.2",
31
+ "type_vocab_size": 2,
32
+ "use_cache": true,
33
+ "vocab_size": 29794
34
+ }
checkpoint-32000/decoder/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.29.2"
5
+ }
checkpoint-32000/decoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6bd5dd018d6e565d78f34474bdfc8b581413a4f2e90bda1fb9c1760295d7868
3
+ size 1741179769
checkpoint-32000/decoder/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-32000/decoder/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-32000/decoder/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
checkpoint-32000/decoder/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-32000/encoder/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "neuralmind/bert-large-portuguese-cased",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 24,
19
+ "output_past": true,
20
+ "pad_token_id": 0,
21
+ "pooler_fc_size": 768,
22
+ "pooler_num_attention_heads": 12,
23
+ "pooler_num_fc_layers": 3,
24
+ "pooler_size_per_head": 128,
25
+ "pooler_type": "first_token_transform",
26
+ "position_embedding_type": "absolute",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.29.2",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 29794
32
+ }
checkpoint-32000/encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47d9c02eb9006b5a252551834cff429550dbbfe63278c465731372b128b82057
3
+ size 1337721965
checkpoint-32000/encoder/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-32000/encoder/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-32000/encoder/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
checkpoint-32000/encoder/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-32000/eval_results.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ eval_loss = 7.8670058326776715
checkpoint-32000/model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": true, "adafactor_scale_parameter": true, "adafactor_warmup_init": true, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "outputs/bertimbau/best_model", "cache_dir": "cache_dir/bertimbau", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 25, "encoding": null, "eval_batch_size": 8, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 3200, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 0.0001, "local_rank": -1, "logging_steps": 3200, "loss_type": null, "loss_args": {}, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 256, "model_name": "neuralmind/bert-large-portuguese-cased-neuralmind/bert-large-portuguese-cased", "model_type": "bert-bert", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 10, "optimizer": "AdamW", "output_dir": "outputs/bertimbau", "overwrite_output_dir": true, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 78, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": 3200, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_name": null, "tokenizer_type": null, "train_batch_size": 8, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {"name": "neuralmind/bert-large-portuguese-cased"}, "wandb_project": "DORE", "warmup_ratio": 0.06, "warmup_steps": 4946, "weight_decay": 0.0, "model_class": "Seq2SeqModel", "base_marian_model_name": null, "dataset_class": null, "dataset_cache_dir": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": true, "faiss_d": 768, "faiss_m": 128, "include_title_in_knowledge_dataset": true, "length_penalty": 2.0, "max_length": 20, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "rag_embed_batch_size": 16, "repetition_penalty": 1.0, "save_knowledge_dataset": true, "save_knowledge_dataset_with_checkpoints": false, "save_recent_only": true, "split_text_character": " ", "split_text_n": 100, "src_lang": "en_XX", "tgt_lang": "ro_RO", "top_k": null, "top_p": null, "use_multiprocessed_decoding": false}
checkpoint-32000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:030a3b5c63db8bfe0cda3b2662696d879cef1e1bfb369f3b61694bdc9f3f232a
3
+ size 6149550514
checkpoint-32000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd42812b358937ac060b9515fccb170357c322b99bde9a7f12725feb1e6c61a9
3
+ size 627
checkpoint-32000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32809fef074c37b064766f37c6bb8656107d2d7170e76df7dbef8e31b1b2e50e
3
+ size 3643
eval_results.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ eval_loss = 7.8670058326776715
training_progress_scores.csv ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ global_step,eval_loss,train_loss
2
+ 3200,0.0010984839246040532,0.0004402927588671446
3
+ 6400,11.42560897227458,6.0060133934021
4
+ 8242,9.020513476242666,6.7133049964904785
5
+ 9600,11.665218191318058,6.697996616363525
6
+ 12800,7.84143747096499,6.460560321807861
7
+ 16000,10.771937537343684,6.483465194702148
8
+ 16484,11.314325284055574,5.990870952606201
9
+ 19200,8.672700035635676,6.374773025512695
10
+ 22400,12.222518371644961,6.672352313995361
11
+ 24726,10.012016912272461,5.828139781951904
12
+ 25600,10.41204889418719,6.098570823669434
13
+ 28800,12.032289798371945,6.654789924621582
14
+ 32000,7.8670058326776715,6.075879096984863