atomwalk12 commited on
Commit
1222850
·
verified ·
1 Parent(s): 58bb15a

Model save

Browse files
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c172a2283c02a6a082d0959e01eaf1d93f8dec431719d826fbef8d5a0f4af0c
3
  size 69782384
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00d5b48881ccefb885f38b035d6f5b2ad9720a70d18b3b29a72a238e809fc964
3
  size 69782384
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "total_flos": 1052153750937600.0,
3
- "train_loss": 0.0,
4
- "train_runtime": 1.9712,
5
- "train_samples": 5,
6
- "train_samples_per_second": 12.682,
7
- "train_steps_per_second": 2.536
8
  }
 
1
  {
2
+ "total_flos": 952828382195712.0,
3
+ "train_loss": 0.985106372833252,
4
+ "train_runtime": 27.7768,
5
+ "train_samples": 4,
6
+ "train_samples_per_second": 0.72,
7
+ "train_steps_per_second": 0.18
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "total_flos": 1052153750937600.0,
3
- "train_loss": 0.0,
4
- "train_runtime": 1.9712,
5
- "train_samples": 5,
6
- "train_samples_per_second": 12.682,
7
- "train_steps_per_second": 2.536
8
  }
 
1
  {
2
+ "total_flos": 952828382195712.0,
3
+ "train_loss": 0.985106372833252,
4
+ "train_runtime": 27.7768,
5
+ "train_samples": 4,
6
+ "train_samples_per_second": 0.72,
7
+ "train_steps_per_second": 0.18
8
  }
trainer_state.json CHANGED
@@ -11,52 +11,52 @@
11
  "log_history": [
12
  {
13
  "epoch": 1.0,
14
- "grad_norm": 2.7632174491882324,
15
  "learning_rate": 0.0,
16
- "loss": 0.8619,
17
- "num_tokens": 22235.0,
18
  "step": 1
19
  },
20
  {
21
  "epoch": 2.0,
22
- "grad_norm": 2.750493288040161,
23
  "learning_rate": 4e-05,
24
- "loss": 0.8617,
25
- "num_tokens": 44470.0,
26
  "step": 2
27
  },
28
  {
29
  "epoch": 3.0,
30
- "grad_norm": 2.421985626220703,
31
  "learning_rate": 3.472792206135786e-05,
32
- "loss": 0.839,
33
- "num_tokens": 66705.0,
34
  "step": 3
35
  },
36
  {
37
  "epoch": 4.0,
38
- "grad_norm": 2.039574384689331,
39
  "learning_rate": 2.2000000000000003e-05,
40
- "loss": 0.807,
41
- "num_tokens": 88940.0,
42
  "step": 4
43
  },
44
  {
45
  "epoch": 5.0,
46
- "grad_norm": 1.7994582653045654,
47
  "learning_rate": 9.272077938642147e-06,
48
- "loss": 0.7828,
49
- "num_tokens": 111175.0,
50
  "step": 5
51
  },
52
  {
53
  "epoch": 5.0,
54
  "step": 5,
55
- "total_flos": 1052153750937600.0,
56
- "train_loss": 0.0,
57
- "train_runtime": 1.9712,
58
- "train_samples_per_second": 12.682,
59
- "train_steps_per_second": 2.536
60
  }
61
  ],
62
  "logging_steps": 1,
@@ -76,7 +76,7 @@
76
  "attributes": {}
77
  }
78
  },
79
- "total_flos": 1052153750937600.0,
80
  "train_batch_size": 2,
81
  "trial_name": null,
82
  "trial_params": null
 
11
  "log_history": [
12
  {
13
  "epoch": 1.0,
14
+ "grad_norm": 4.060195446014404,
15
  "learning_rate": 0.0,
16
+ "loss": 1.0285,
17
+ "num_tokens": 19000.0,
18
  "step": 1
19
  },
20
  {
21
  "epoch": 2.0,
22
+ "grad_norm": 4.069539546966553,
23
  "learning_rate": 4e-05,
24
+ "loss": 1.0285,
25
+ "num_tokens": 38000.0,
26
  "step": 2
27
  },
28
  {
29
  "epoch": 3.0,
30
+ "grad_norm": 3.7945613861083984,
31
  "learning_rate": 3.472792206135786e-05,
32
+ "loss": 0.9928,
33
+ "num_tokens": 57000.0,
34
  "step": 3
35
  },
36
  {
37
  "epoch": 4.0,
38
+ "grad_norm": 3.1087138652801514,
39
  "learning_rate": 2.2000000000000003e-05,
40
+ "loss": 0.9576,
41
+ "num_tokens": 76000.0,
42
  "step": 4
43
  },
44
  {
45
  "epoch": 5.0,
46
+ "grad_norm": 2.892008066177368,
47
  "learning_rate": 9.272077938642147e-06,
48
+ "loss": 0.9181,
49
+ "num_tokens": 95000.0,
50
  "step": 5
51
  },
52
  {
53
  "epoch": 5.0,
54
  "step": 5,
55
+ "total_flos": 952828382195712.0,
56
+ "train_loss": 0.985106372833252,
57
+ "train_runtime": 27.7768,
58
+ "train_samples_per_second": 0.72,
59
+ "train_steps_per_second": 0.18
60
  }
61
  ],
62
  "logging_steps": 1,
 
76
  "attributes": {}
77
  }
78
  },
79
+ "total_flos": 952828382195712.0,
80
  "train_batch_size": 2,
81
  "trial_name": null,
82
  "trial_params": null