JunYK commited on
Commit
9f7d478
·
verified ·
1 Parent(s): 238575f

JunYK/bert-b.m.c-STT-Summariz-classification-v3

Browse files
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  library_name: peft
3
- license: apache-2.0
4
- base_model: bert-base-multilingual-cased
5
  tags:
6
  - generated_from_trainer
7
  model-index:
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # results
16
 
17
- This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unknown dataset.
18
 
19
  ## Model description
20
 
@@ -33,14 +33,14 @@ More information needed
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
- - learning_rate: 1e-05
37
- - train_batch_size: 16
38
- - eval_batch_size: 16
39
  - seed: 42
40
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
41
- - lr_scheduler_type: cosine
42
- - lr_scheduler_warmup_steps: 100
43
- - num_epochs: 15
44
 
45
  ### Training results
46
 
@@ -50,5 +50,5 @@ The following hyperparameters were used during training:
50
 
51
  - PEFT 0.14.0
52
  - Transformers 4.48.3
53
- - Pytorch 2.5.1+cu124
54
- - Tokenizers 0.21.0
 
1
  ---
2
  library_name: peft
3
+ license: cc-by-sa-4.0
4
+ base_model: klue/bert-base
5
  tags:
6
  - generated_from_trainer
7
  model-index:
 
14
 
15
  # results
16
 
17
+ This model is a fine-tuned version of [klue/bert-base](https://huggingface.co/klue/bert-base) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
+ - learning_rate: 3e-05
37
+ - train_batch_size: 8
38
+ - eval_batch_size: 8
39
  - seed: 42
40
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
41
+ - lr_scheduler_type: linear
42
+ - lr_scheduler_warmup_steps: 20
43
+ - num_epochs: 10
44
 
45
  ### Training results
46
 
 
50
 
51
  - PEFT 0.14.0
52
  - Transformers 4.48.3
53
+ - Pytorch 2.6.0+cu124
54
+ - Tokenizers 0.21.1
adapter_config.json CHANGED
@@ -4,7 +4,7 @@
4
  "base_model_class": "BertForSequenceClassification",
5
  "parent_library": "transformers.models.bert.modeling_bert"
6
  },
7
- "base_model_name_or_path": "bert-base-multilingual-cased",
8
  "bias": "none",
9
  "eva_config": null,
10
  "exclude_modules": null,
@@ -15,19 +15,19 @@
15
  "layers_pattern": null,
16
  "layers_to_transform": null,
17
  "loftq_config": {},
18
- "lora_alpha": 64,
19
  "lora_bias": false,
20
  "lora_dropout": 0.1,
21
  "megatron_config": null,
22
  "megatron_core": "megatron.core",
23
  "modules_to_save": null,
24
  "peft_type": "LORA",
25
- "r": 32,
26
  "rank_pattern": {},
27
  "revision": null,
28
  "target_modules": [
29
- "query",
30
- "value"
31
  ],
32
  "task_type": null,
33
  "use_dora": false,
 
4
  "base_model_class": "BertForSequenceClassification",
5
  "parent_library": "transformers.models.bert.modeling_bert"
6
  },
7
+ "base_model_name_or_path": "klue/bert-base",
8
  "bias": "none",
9
  "eva_config": null,
10
  "exclude_modules": null,
 
15
  "layers_pattern": null,
16
  "layers_to_transform": null,
17
  "loftq_config": {},
18
+ "lora_alpha": 32,
19
  "lora_bias": false,
20
  "lora_dropout": 0.1,
21
  "megatron_config": null,
22
  "megatron_core": "megatron.core",
23
  "modules_to_save": null,
24
  "peft_type": "LORA",
25
+ "r": 8,
26
  "rank_pattern": {},
27
  "revision": null,
28
  "target_modules": [
29
+ "value",
30
+ "query"
31
  ],
32
  "task_type": null,
33
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69e1fe80cb82359c3d976db9d3da38e2eb725a67418a6426d211d84f6da3522a
3
- size 4725384
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33df96da1162d01331904fcea80a98d4f1a9791df2e4fc6dd9b9827eac3d4f1a
3
+ size 1186328
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ca8e4a2408b57e196ba7a46dfd30b33797ef66c307d93c83122e9dd5f256fb1
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da95ffa226d6aa2bd6d2647d73488b8b326b19382c7992280b238c62807f6582
3
  size 5240