atomwalk12 commited on
Commit
fa3dbfc
·
verified ·
1 Parent(s): b0ab316

Model save

Browse files
README.md CHANGED
@@ -4,8 +4,8 @@ library_name: transformers
4
  model_name: Qwen3-1.7B-Instruct-SFT
5
  tags:
6
  - generated_from_trainer
7
- - sft
8
  - trl
 
9
  licence: license
10
  ---
11
 
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/walks/huggingface/runs/h2za7yxg)
31
 
32
 
33
  This model was trained with SFT.
 
4
  model_name: Qwen3-1.7B-Instruct-SFT
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
+ - sft
9
  licence: license
10
  ---
11
 
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/walks/huggingface/runs/5s2ms8vh)
31
 
32
 
33
  This model was trained with SFT.
adapter_config.json CHANGED
@@ -25,13 +25,13 @@
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
 
 
28
  "gate_proj",
29
- "k_proj",
30
  "down_proj",
31
- "v_proj",
32
- "up_proj",
33
  "q_proj",
34
- "o_proj"
 
35
  ],
36
  "task_type": "CAUSAL_LM",
37
  "trainable_token_indices": null,
 
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
+ "v_proj",
29
+ "o_proj",
30
  "gate_proj",
 
31
  "down_proj",
 
 
32
  "q_proj",
33
+ "k_proj",
34
+ "up_proj"
35
  ],
36
  "task_type": "CAUSAL_LM",
37
  "trainable_token_indices": null,
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 952828382195712.0,
3
  "train_loss": 0.0,
4
- "train_runtime": 8.0549,
5
  "train_samples": 4,
6
- "train_samples_per_second": 2.483,
7
- "train_steps_per_second": 0.621
8
  }
 
1
  {
2
  "total_flos": 952828382195712.0,
3
  "train_loss": 0.0,
4
+ "train_runtime": 31.9031,
5
  "train_samples": 4,
6
+ "train_samples_per_second": 0.627,
7
+ "train_steps_per_second": 0.157
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 952828382195712.0,
3
  "train_loss": 0.0,
4
- "train_runtime": 8.0549,
5
  "train_samples": 4,
6
- "train_samples_per_second": 2.483,
7
- "train_steps_per_second": 0.621
8
  }
 
1
  {
2
  "total_flos": 952828382195712.0,
3
  "train_loss": 0.0,
4
+ "train_runtime": 31.9031,
5
  "train_samples": 4,
6
+ "train_samples_per_second": 0.627,
7
+ "train_steps_per_second": 0.157
8
  }
trainer_state.json CHANGED
@@ -54,9 +54,9 @@
54
  "step": 5,
55
  "total_flos": 952828382195712.0,
56
  "train_loss": 0.0,
57
- "train_runtime": 8.0549,
58
- "train_samples_per_second": 2.483,
59
- "train_steps_per_second": 0.621
60
  }
61
  ],
62
  "logging_steps": 1,
 
54
  "step": 5,
55
  "total_flos": 952828382195712.0,
56
  "train_loss": 0.0,
57
+ "train_runtime": 31.9031,
58
+ "train_samples_per_second": 0.627,
59
+ "train_steps_per_second": 0.157
60
  }
61
  ],
62
  "logging_steps": 1,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b66e38d92de6361d941bf3307d9d3c6785e81b8963710d69a4534bdfe8cf6d09
3
  size 6673
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62397a1e60f6bbe5d7ac28c5b215abc6fb95c0c3cc6d436a1514afc253f61a05
3
  size 6673