RealDragonMA commited on
Commit
0c441a7
·
verified ·
1 Parent(s): 3126bf0

Model save

Browse files
README.md CHANGED
@@ -27,17 +27,17 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/realdragonma-pelliculum/huggingface/runs/gucz3wwp)
31
 
32
 
33
  This model was trained with SFT.
34
 
35
  ### Framework versions
36
 
37
- - TRL: 0.14.0
38
- - Transformers: 4.48.3
39
  - Pytorch: 2.6.0+cu124
40
- - Datasets: 3.2.0
41
  - Tokenizers: 0.21.0
42
 
43
  ## Citations
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/realdragonma-pelliculum/huggingface/runs/wcdoae50)
31
 
32
 
33
  This model was trained with SFT.
34
 
35
  ### Framework versions
36
 
37
+ - TRL: 0.15.0
38
+ - Transformers: 4.49.0
39
  - Pytorch: 2.6.0+cu124
40
+ - Datasets: 3.3.1
41
  - Tokenizers: 0.21.0
42
 
43
  ## Citations
final_checkpoint/adapter_config.json CHANGED
@@ -23,10 +23,10 @@
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
 
26
  "q_proj",
27
  "o_proj",
28
- "v_proj",
29
- "k_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
+ "k_proj",
27
  "q_proj",
28
  "o_proj",
29
+ "v_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
final_checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7c0d86cfce92d8c1604bab7d4c9cc8c90cf852c6a21ec3bf0e5b294ed2ff9a2
3
  size 29523136
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf7f8b223f35118ded6f308ce0488d12a93f556f0cc1e31b007ce46ae8940916
3
  size 29523136
final_checkpoint/tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
final_checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:468d6144abe817c443d42dc9f38248e1b7dacc79a09c721a19ae706c7c779e73
3
- size 5688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19ffad974e6c86ba8d10a67fd65eeaea81497bd8351104df21e2567e618973a0
3
+ size 5624