Model save
Browse files- README.md +3 -3
- adapter_config.json +2 -0
- adapter_model.safetensors +1 -1
- train.log +0 -0
- training_args.bin +2 -2
README.md
CHANGED
@@ -38,10 +38,10 @@ The following hyperparameters were used during training:
|
|
38 |
- eval_batch_size: 1
|
39 |
- seed: 42
|
40 |
- distributed_type: multi-GPU
|
41 |
-
- num_devices:
|
42 |
-
- gradient_accumulation_steps:
|
43 |
- total_train_batch_size: 32
|
44 |
-
- total_eval_batch_size:
|
45 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
46 |
- lr_scheduler_type: cosine
|
47 |
- lr_scheduler_warmup_ratio: 0.05
|
|
|
38 |
- eval_batch_size: 1
|
39 |
- seed: 42
|
40 |
- distributed_type: multi-GPU
|
41 |
+
- num_devices: 4
|
42 |
+
- gradient_accumulation_steps: 8
|
43 |
- total_train_batch_size: 32
|
44 |
+
- total_eval_batch_size: 4
|
45 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
46 |
- lr_scheduler_type: cosine
|
47 |
- lr_scheduler_warmup_ratio: 0.05
|
adapter_config.json
CHANGED
@@ -12,6 +12,8 @@
|
|
12 |
"modules_to_save": [
|
13 |
"connector",
|
14 |
"mm_projector",
|
|
|
|
|
15 |
"lm_head",
|
16 |
"informative_head",
|
17 |
"relevance_head",
|
|
|
12 |
"modules_to_save": [
|
13 |
"connector",
|
14 |
"mm_projector",
|
15 |
+
"response_head",
|
16 |
+
"related_head",
|
17 |
"lm_head",
|
18 |
"informative_head",
|
19 |
"relevance_head",
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1204780872
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5477f4dfacc81f6e31f056965a0c432039a9bbc8fac4b6c7dfb8c5422b215cbd
|
3 |
size 1204780872
|
train.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1852aba2ace7e3641ffaab3c86d119cebb4b6301c7f8879296ca7eb3878f113a
|
3 |
+
size 7672
|