AhChat commited on
Commit
91ac687
·
verified ·
1 Parent(s): f10f786

End of training

Browse files
README.md CHANGED
@@ -1,56 +1,62 @@
1
- ---
2
- license: apache-2.0
3
- base_model: distilroberta-base
4
- tags:
5
- - generated_from_trainer
6
- datasets:
7
- - eli5_category
8
- model-index:
9
- - name: my_awesome_eli5_mlm_model
10
- results: []
11
- ---
12
-
13
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
- should probably proofread and complete it, then remove this comment. -->
15
-
16
- # my_awesome_eli5_mlm_model
17
-
18
- This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the eli5_category dataset.
19
- It achieves the following results on the evaluation set:
20
- - Loss: 2.0195
21
-
22
- ## Model description
23
-
24
- More information needed
25
-
26
- ## Intended uses & limitations
27
-
28
- More information needed
29
-
30
- ## Training and evaluation data
31
-
32
- More information needed
33
-
34
- ## Training procedure
35
-
36
- ### Training hyperparameters
37
-
38
- The following hyperparameters were used during training:
39
- - learning_rate: 2e-05
40
- - train_batch_size: 8
41
- - eval_batch_size: 8
42
- - seed: 42
43
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
- - lr_scheduler_type: linear
45
- - num_epochs: 3
46
-
47
- ### Training results
48
-
49
-
50
-
51
- ### Framework versions
52
-
53
- - Transformers 4.36.1
54
- - Pytorch 2.7.1+cu128
55
- - Datasets 3.2.0
56
- - Tokenizers 0.15.2
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: distilroberta-base
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - eli5_category
9
+ model-index:
10
+ - name: my_awesome_eli5_mlm_model
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # my_awesome_eli5_mlm_model
18
+
19
+ This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the eli5_category dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 1.9992
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 2e-05
41
+ - train_batch_size: 8
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
+ - lr_scheduler_type: linear
46
+ - num_epochs: 3
47
+
48
+ ### Training results
49
+
50
+ | Training Loss | Epoch | Step | Validation Loss |
51
+ |:-------------:|:-----:|:----:|:---------------:|
52
+ | 2.2552 | 1.0 | 1299 | 2.0395 |
53
+ | 2.1823 | 2.0 | 2598 | 2.0380 |
54
+ | 2.1545 | 3.0 | 3897 | 2.0091 |
55
+
56
+
57
+ ### Framework versions
58
+
59
+ - Transformers 4.52.3
60
+ - Pytorch 2.6.0+cu124
61
+ - Datasets 3.6.0
62
+ - Tokenizers 0.21.1
config.json CHANGED
@@ -1,27 +1,26 @@
1
- {
2
- "_name_or_path": "distilroberta-base",
3
- "architectures": [
4
- "RobertaForMaskedLM"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
- "classifier_dropout": null,
9
- "eos_token_id": 2,
10
- "hidden_act": "gelu",
11
- "hidden_dropout_prob": 0.1,
12
- "hidden_size": 768,
13
- "initializer_range": 0.02,
14
- "intermediate_size": 3072,
15
- "layer_norm_eps": 1e-05,
16
- "max_position_embeddings": 514,
17
- "model_type": "roberta",
18
- "num_attention_heads": 12,
19
- "num_hidden_layers": 6,
20
- "pad_token_id": 1,
21
- "position_embedding_type": "absolute",
22
- "torch_dtype": "float32",
23
- "transformers_version": "4.36.1",
24
- "type_vocab_size": 1,
25
- "use_cache": true,
26
- "vocab_size": 50265
27
- }
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 6,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.52.3",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 50265
26
+ }
 
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f5c880fe3cc8007307ec62e6b58152d74a9d543e30fcd76a8f3295a6c024b4b
3
  size 328693404
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:841344aa838454b0616a21cb798300a460dcbc625429d6ce6736e64589466038
3
  size 328693404
runs/Jun06_12-43-08_dc85121ec65a/events.out.tfevents.1749213797.dc85121ec65a.1802.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:873fd4dbd57aa60f0a6c9ef97092aa10a35c78a2da483828d45a4cd70e8eb833
3
- size 6966
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19e7bb55523b37dd3ff13f76c9bde9ff6430f95698720bd2a74c7927e89a3471
3
+ size 7591
runs/Jun06_12-43-08_dc85121ec65a/events.out.tfevents.1749215944.dc85121ec65a.1802.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1700b91561702a16138ada73d6cf1adf84f85bcce5dabe6804fee5aa342b7f81
3
+ size 359
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "</s>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "extra_special_tokens": {},
51
+ "mask_token": "<mask>",
52
+ "model_max_length": 512,
53
+ "pad_token": "</s>",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": "<unk>"
58
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a0b3eec019a621f3eff1b01ddcab15df20289bbd20dd70205c6090ecacea17a
3
- size 5137
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe083d9e6b74a2ae1f2b7a974d7801ea506eac008641c081c97b9cc4192d9a5e
3
+ size 5304
vocab.json ADDED
The diff for this file is too large to render. See raw diff