mschonhardt commited on
Commit
b8220b3
·
verified ·
1 Parent(s): 0de80cb

Model save

Browse files
README.md CHANGED
@@ -18,7 +18,7 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [google/byt5-base](https://huggingface.co/google/byt5-base) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.1012
22
 
23
  ## Model description
24
 
@@ -37,25 +37,26 @@ More information needed
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
- - learning_rate: 5e-05
41
  - train_batch_size: 8
42
  - eval_batch_size: 8
43
  - seed: 42
44
  - gradient_accumulation_steps: 2
45
  - total_train_batch_size: 16
46
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
47
- - lr_scheduler_type: linear
 
48
  - num_epochs: 5
49
 
50
  ### Training results
51
 
52
  | Training Loss | Epoch | Step | Validation Loss |
53
  |:-------------:|:-----:|:----:|:---------------:|
54
- | 0.3238 | 1.0 | 971 | 0.1668 |
55
- | 0.2426 | 2.0 | 1942 | 0.1217 |
56
- | 0.2278 | 3.0 | 2913 | 0.1152 |
57
- | 0.2076 | 4.0 | 3884 | 0.1062 |
58
- | 0.1946 | 5.0 | 4855 | 0.1012 |
59
 
60
 
61
  ### Framework versions
 
18
 
19
  This model is a fine-tuned version of [google/byt5-base](https://huggingface.co/google/byt5-base) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.0313
22
 
23
  ## Model description
24
 
 
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
+ - learning_rate: 3e-05
41
  - train_batch_size: 8
42
  - eval_batch_size: 8
43
  - seed: 42
44
  - gradient_accumulation_steps: 2
45
  - total_train_batch_size: 16
46
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_ratio: 0.1
49
  - num_epochs: 5
50
 
51
  ### Training results
52
 
53
  | Training Loss | Epoch | Step | Validation Loss |
54
  |:-------------:|:-----:|:----:|:---------------:|
55
+ | 0.2915 | 1.0 | 971 | 0.1431 |
56
+ | 0.1664 | 2.0 | 1942 | 0.0804 |
57
+ | 0.0931 | 3.0 | 2913 | 0.0365 |
58
+ | 0.0768 | 4.0 | 3884 | 0.0319 |
59
+ | 0.076 | 5.0 | 4855 | 0.0313 |
60
 
61
 
62
  ### Framework versions
adapter_config.json CHANGED
@@ -13,7 +13,7 @@
13
  "layers_pattern": null,
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
- "lora_alpha": 32,
17
  "lora_bias": false,
18
  "lora_dropout": 0.05,
19
  "megatron_config": null,
@@ -21,12 +21,14 @@
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
  "qalora_group_size": 16,
24
- "r": 16,
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
 
28
  "q",
29
- "v"
 
30
  ],
31
  "task_type": "SEQ_2_SEQ_LM",
32
  "trainable_token_indices": null,
 
13
  "layers_pattern": null,
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
+ "lora_alpha": 64,
17
  "lora_bias": false,
18
  "lora_dropout": 0.05,
19
  "megatron_config": null,
 
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
  "qalora_group_size": 16,
24
+ "r": 32,
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
+ "o",
29
  "q",
30
+ "v",
31
+ "k"
32
  ],
33
  "task_type": "SEQ_2_SEQ_LM",
34
  "trainable_token_indices": null,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3da6f1cf1a94b3131627dda9b89e692d2ccab3ea11b285398a3facfa546f22a4
3
- size 8864192
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21fe614b7a89d08829e033dca2e81c895db46dfe8d6f5c2d0cd09cf4ede9c6a9
3
+ size 35423448
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e0c214c005419021773232efae913c409aa7202e8e7e16875d75f407e83f7a8
3
  size 5969
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16f042ff3cdbede964873f42ac6dc80e1d4ff2b4c66c11330b0531eb8ee392ef
3
  size 5969