kimlong22 commited on
Commit
0f836e7
·
verified ·
1 Parent(s): 995369d

Model save

Browse files
README.md CHANGED
@@ -1,27 +1,27 @@
1
  ---
2
- license: apache-2.0
3
- base_model: google-bert/bert-base-multilingual-cased
4
  tags:
5
  - generated_from_trainer
6
  metrics:
7
  - precision
8
  - recall
9
  model-index:
10
- - name: lex-cross-encoder-mbert-10neg
11
  results: []
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
- # lex-cross-encoder-mbert-10neg
18
 
19
- This model is a fine-tuned version of [google-bert/bert-base-multilingual-cased](https://huggingface.co/google-bert/bert-base-multilingual-cased) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.4360
22
- - Precision: 0.6020
23
- - Recall: 0.8593
24
- - F2: 0.7917
25
 
26
  ## Model description
27
 
@@ -40,38 +40,30 @@ More information needed
40
  ### Training hyperparameters
41
 
42
  The following hyperparameters were used during training:
43
- - learning_rate: 1e-05
44
- - train_batch_size: 16
45
- - eval_batch_size: 16
46
  - seed: 42
47
  - distributed_type: multi-GPU
48
  - num_devices: 8
49
- - total_train_batch_size: 128
50
- - total_eval_batch_size: 128
51
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: cosine
53
  - lr_scheduler_warmup_ratio: 0.1
54
- - num_epochs: 10
55
 
56
  ### Training results
57
 
58
- | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F2 |
59
- |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|
60
- | 0.4572 | 1.0 | 2317 | 0.4705 | 0.4735 | 0.8620 | 0.7405 |
61
- | 0.4283 | 2.0 | 4634 | 0.4515 | 0.4774 | 0.9124 | 0.7718 |
62
- | 0.4115 | 3.0 | 6951 | 0.4485 | 0.4796 | 0.9201 | 0.7773 |
63
- | 0.4021 | 4.0 | 9268 | 0.4387 | 0.5217 | 0.9068 | 0.7902 |
64
- | 0.3918 | 5.0 | 11585 | 0.4466 | 0.6111 | 0.8242 | 0.7705 |
65
- | 0.3879 | 6.0 | 13902 | 0.4337 | 0.5783 | 0.8767 | 0.7947 |
66
- | 0.383 | 7.0 | 16219 | 0.4336 | 0.5633 | 0.8907 | 0.7980 |
67
- | 0.3781 | 8.0 | 18536 | 0.4354 | 0.5929 | 0.8660 | 0.7930 |
68
- | 0.3767 | 9.0 | 20853 | 0.4353 | 0.5980 | 0.8636 | 0.7931 |
69
- | 0.3712 | 10.0 | 23170 | 0.4360 | 0.6020 | 0.8593 | 0.7917 |
70
 
71
 
72
  ### Framework versions
73
 
74
  - Transformers 4.39.1
75
- - Pytorch 2.5.1+cu121
76
  - Datasets 3.6.0
77
  - Tokenizers 0.15.2
 
1
  ---
2
+ license: mit
3
+ base_model: microsoft/mdeberta-v3-base
4
  tags:
5
  - generated_from_trainer
6
  metrics:
7
  - precision
8
  - recall
9
  model-index:
10
+ - name: lex-cross-encoder-mdeberta-v3-base-5neg
11
  results: []
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
+ # lex-cross-encoder-mdeberta-v3-base-5neg
18
 
19
+ This model is a fine-tuned version of [microsoft/mdeberta-v3-base](https://huggingface.co/microsoft/mdeberta-v3-base) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.6811
22
+ - Precision: 0.2
23
+ - Recall: 1.0
24
+ - F2: 0.5556
25
 
26
  ## Model description
27
 
 
40
  ### Training hyperparameters
41
 
42
  The following hyperparameters were used during training:
43
+ - learning_rate: 1e-06
44
+ - train_batch_size: 8
45
+ - eval_batch_size: 8
46
  - seed: 42
47
  - distributed_type: multi-GPU
48
  - num_devices: 8
49
+ - total_train_batch_size: 64
50
+ - total_eval_batch_size: 64
51
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: cosine
53
  - lr_scheduler_warmup_ratio: 0.1
54
+ - num_epochs: 2
55
 
56
  ### Training results
57
 
58
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F2 |
59
+ |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|
60
+ | 0.7455 | 1.0 | 1 | 0.6810 | 0.2 | 1.0 | 0.5556 |
61
+ | 0.7455 | 2.0 | 2 | 0.6811 | 0.2 | 1.0 | 0.5556 |
 
 
 
 
 
 
 
 
62
 
63
 
64
  ### Framework versions
65
 
66
  - Transformers 4.39.1
67
+ - Pytorch 2.6.0+cu124
68
  - Datasets 3.6.0
69
  - Tokenizers 0.15.2
final_model/config.json CHANGED
@@ -1,50 +1,35 @@
1
  {
2
- "_name_or_path": "Alibaba-NLP/gte-multilingual-reranker-base",
3
  "architectures": [
4
- "NewForSequenceClassification"
5
  ],
6
- "attention_probs_dropout_prob": 0.0,
7
- "auto_map": {
8
- "AutoConfig": "Alibaba-NLP/new-impl--configuration.NewConfig",
9
- "AutoModel": "Alibaba-NLP/new-impl--modeling.NewModel",
10
- "AutoModelForMaskedLM": "Alibaba-NLP/new-impl--modeling.NewForMaskedLM",
11
- "AutoModelForMultipleChoice": "Alibaba-NLP/new-impl--modeling.NewForMultipleChoice",
12
- "AutoModelForQuestionAnswering": "Alibaba-NLP/new-impl--modeling.NewForQuestionAnswering",
13
- "AutoModelForSequenceClassification": "Alibaba-NLP/new-impl--modeling.NewForSequenceClassification",
14
- "AutoModelForTokenClassification": "Alibaba-NLP/new-impl--modeling.NewForTokenClassification"
15
- },
16
- "classifier_dropout": 0.0,
17
  "hidden_act": "gelu",
18
  "hidden_dropout_prob": 0.1,
19
  "hidden_size": 768,
20
- "id2label": {
21
- "0": "LABEL_0"
22
- },
23
  "initializer_range": 0.02,
24
  "intermediate_size": 3072,
25
- "label2id": {
26
- "LABEL_0": 0
27
- },
28
- "layer_norm_eps": 1e-12,
29
- "layer_norm_type": "layer_norm",
30
- "logn_attention_clip1": false,
31
- "logn_attention_scale": false,
32
- "max_position_embeddings": 8192,
33
- "model_type": "new",
34
  "num_attention_heads": 12,
35
  "num_hidden_layers": 12,
36
- "pack_qkv": true,
37
- "pad_token_id": 1,
38
- "position_embedding_type": "rope",
39
- "rope_scaling": {
40
- "factor": 8.0,
41
- "type": "ntk"
42
- },
43
- "rope_theta": 20000,
 
 
 
 
44
  "torch_dtype": "float32",
45
  "transformers_version": "4.39.1",
46
- "type_vocab_size": 1,
47
- "unpad_inputs": false,
48
- "use_memory_efficient_attention": false,
49
- "vocab_size": 250048
50
  }
 
1
  {
2
+ "_name_or_path": "microsoft/mdeberta-v3-base",
3
  "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
 
 
 
 
 
 
 
 
 
 
7
  "hidden_act": "gelu",
8
  "hidden_dropout_prob": 0.1,
9
  "hidden_size": 768,
 
 
 
10
  "initializer_range": 0.02,
11
  "intermediate_size": 3072,
12
+ "layer_norm_eps": 1e-07,
13
+ "max_position_embeddings": 512,
14
+ "max_relative_positions": -1,
15
+ "model_type": "deberta-v2",
16
+ "norm_rel_ebd": "layer_norm",
 
 
 
 
17
  "num_attention_heads": 12,
18
  "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_dropout": 0,
21
+ "pooler_hidden_act": "gelu",
22
+ "pooler_hidden_size": 768,
23
+ "pos_att_type": [
24
+ "p2c",
25
+ "c2p"
26
+ ],
27
+ "position_biased_input": false,
28
+ "position_buckets": 256,
29
+ "relative_attention": true,
30
+ "share_att_key": true,
31
  "torch_dtype": "float32",
32
  "transformers_version": "4.39.1",
33
+ "type_vocab_size": 0,
34
+ "vocab_size": 251000
 
 
35
  }
final_model/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d096a2bf9e6d3723ca116fc08bcd30246856f821d5f5723dcbc067101e4b20c
3
- size 1223854204
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89c03989f75d0aa4bd09aa494db2abf9fdb0699814577f09a2b8630feeff3136
3
+ size 1115268200
final_model/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:faf4985ae8e249f909a317e50716aa5e88a5b360258c73b674d86bb7fe61a012
3
- size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c35a0a052756eed7c3a8019317fe0fdeda73d19c1f4811fdf8efcef22ec1a7d
3
+ size 5048