WangXFng commited on
Commit
5dd5ded
·
verified ·
1 Parent(s): 6dd3a52

Model save

Browse files
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
 
2
  library_name: peft
3
  license: llama3.2
4
- base_model: meta-llama/Llama-3.2-3B-Instruct
5
  tags:
6
  - generated_from_trainer
7
  model-index:
@@ -33,14 +33,15 @@ More information needed
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
- - learning_rate: 5e-05
37
  - train_batch_size: 16
38
  - eval_batch_size: 8
39
  - seed: 42
40
  - gradient_accumulation_steps: 16
41
  - total_train_batch_size: 256
42
- - optimizer: Use adamw_hf with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
 
44
  - num_epochs: 4
45
 
46
  ### Training results
@@ -49,7 +50,7 @@ The following hyperparameters were used during training:
49
 
50
  ### Framework versions
51
 
52
- - PEFT 0.14.0
53
- - Transformers 4.47.1
54
- - Pytorch 2.5.1
55
- - Tokenizers 0.21.0
 
1
  ---
2
+ base_model: meta-llama/Llama-3.2-3B-Instruct
3
  library_name: peft
4
  license: llama3.2
 
5
  tags:
6
  - generated_from_trainer
7
  model-index:
 
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
+ - learning_rate: 0.0001
37
  - train_batch_size: 16
38
  - eval_batch_size: 8
39
  - seed: 42
40
  - gradient_accumulation_steps: 16
41
  - total_train_batch_size: 256
42
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
+ - lr_scheduler_warmup_steps: 2
45
  - num_epochs: 4
46
 
47
  ### Training results
 
50
 
51
  ### Framework versions
52
 
53
+ - PEFT 0.13.0
54
+ - Transformers 4.45.2
55
+ - Pytorch 2.4.0
56
+ - Tokenizers 0.20.0
adapter_config.json CHANGED
@@ -3,8 +3,6 @@
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "meta-llama/Llama-3.2-3B-Instruct",
5
  "bias": "none",
6
- "eva_config": null,
7
- "exclude_modules": null,
8
  "fan_in_fan_out": false,
9
  "inference_mode": true,
10
  "init_lora_weights": true,
@@ -13,7 +11,6 @@
13
  "layers_to_transform": null,
14
  "loftq_config": {},
15
  "lora_alpha": 32,
16
- "lora_bias": false,
17
  "lora_dropout": 0.05,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
@@ -24,12 +21,12 @@
24
  "revision": null,
25
  "target_modules": [
26
  "q_proj",
27
- "down_proj",
28
- "v_proj",
29
  "k_proj",
30
- "up_proj",
31
  "gate_proj",
32
- "o_proj"
 
 
 
33
  ],
34
  "task_type": "CAUSAL_LM",
35
  "use_dora": false,
 
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "meta-llama/Llama-3.2-3B-Instruct",
5
  "bias": "none",
 
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
 
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 32,
 
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
 
21
  "revision": null,
22
  "target_modules": [
23
  "q_proj",
 
 
24
  "k_proj",
 
25
  "gate_proj",
26
+ "v_proj",
27
+ "down_proj",
28
+ "o_proj",
29
+ "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f5ccf722379804ea9ef74d4683e1f5161bfd1be3c0e4617a179c17cbcce8874
3
  size 1684597880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e502db4380eb225e5e8e326d092b5164248dec0da88569586775f63d5112a7ab
3
  size 1684597880
config.json CHANGED
@@ -33,7 +33,7 @@
33
  "rope_theta": 500000.0,
34
  "tie_word_embeddings": true,
35
  "torch_dtype": "bfloat16",
36
- "transformers_version": "4.47.1",
37
  "use_cache": true,
38
  "vocab_size": 129174
39
  }
 
33
  "rope_theta": 500000.0,
34
  "tie_word_embeddings": true,
35
  "torch_dtype": "bfloat16",
36
+ "transformers_version": "4.45.2",
37
  "use_cache": true,
38
  "vocab_size": 129174
39
  }
tokenizer_config.json CHANGED
@@ -9397,7 +9397,6 @@
9397
  "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {{- \"<|eot_id|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
9398
  "clean_up_tokenization_spaces": true,
9399
  "eos_token": "<|eot_id|>",
9400
- "extra_special_tokens": {},
9401
  "model_input_names": [
9402
  "input_ids",
9403
  "attention_mask"
 
9397
  "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {{- \"<|eot_id|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
9398
  "clean_up_tokenization_spaces": true,
9399
  "eos_token": "<|eot_id|>",
 
9400
  "model_input_names": [
9401
  "input_ids",
9402
  "attention_mask"
trainer_state.json CHANGED
@@ -10,68 +10,68 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
- "grad_norm": 6.504103183746338,
14
- "learning_rate": 4.393203883495146e-05,
15
- "loss": 17.9006,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
- "grad_norm": 9.236562728881836,
21
- "learning_rate": 3.7864077669902914e-05,
22
- "loss": 10.3383,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
- "grad_norm": 16.256534576416016,
28
- "learning_rate": 3.1796116504854373e-05,
29
- "loss": 8.7552,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
- "grad_norm": 17.89598846435547,
35
- "learning_rate": 2.5728155339805826e-05,
36
- "loss": 7.8554,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
- "grad_norm": 16.893712997436523,
42
- "learning_rate": 1.9660194174757282e-05,
43
- "loss": 7.3315,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
- "grad_norm": 13.431181907653809,
49
- "learning_rate": 1.3592233009708738e-05,
50
- "loss": 7.0833,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
- "grad_norm": 12.282544136047363,
56
- "learning_rate": 7.524271844660194e-06,
57
- "loss": 6.9,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
- "grad_norm": 11.840730667114258,
63
- "learning_rate": 1.4563106796116506e-06,
64
- "loss": 6.8409,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
- "total_flos": 1.4507840127190487e+18,
71
- "train_loss": 9.058904229321525,
72
- "train_runtime": 27634.2806,
73
- "train_samples_per_second": 19.083,
74
- "train_steps_per_second": 0.075
75
  }
76
  ],
77
  "logging_steps": 250,
@@ -91,7 +91,7 @@
91
  "attributes": {}
92
  }
93
  },
94
- "total_flos": 1.4507840127190487e+18,
95
  "train_batch_size": 16,
96
  "trial_name": null,
97
  "trial_params": null
 
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
+ "grad_norm": 0.5462909936904907,
14
+ "learning_rate": 8.794946550048592e-05,
15
+ "loss": 0.7922,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
+ "grad_norm": 0.5513872504234314,
21
+ "learning_rate": 7.580174927113704e-05,
22
+ "loss": 0.3621,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
+ "grad_norm": 0.5781261324882507,
28
+ "learning_rate": 6.365403304178815e-05,
29
+ "loss": 0.3085,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
+ "grad_norm": 0.6140846014022827,
35
+ "learning_rate": 5.150631681243926e-05,
36
+ "loss": 0.2823,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
+ "grad_norm": 0.5387245416641235,
42
+ "learning_rate": 3.9358600583090386e-05,
43
+ "loss": 0.2616,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
+ "grad_norm": 0.5812926292419434,
49
+ "learning_rate": 2.72108843537415e-05,
50
+ "loss": 0.252,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
+ "grad_norm": 0.538931131362915,
56
+ "learning_rate": 1.5063168124392615e-05,
57
+ "loss": 0.2393,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
+ "grad_norm": 0.5031210780143738,
63
+ "learning_rate": 2.915451895043732e-06,
64
+ "loss": 0.2342,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
+ "total_flos": 1.4514285460762153e+18,
71
+ "train_loss": 0.33831017781229855,
72
+ "train_runtime": 16558.4463,
73
+ "train_samples_per_second": 31.848,
74
+ "train_steps_per_second": 0.124
75
  }
76
  ],
77
  "logging_steps": 250,
 
91
  "attributes": {}
92
  }
93
  },
94
+ "total_flos": 1.4514285460762153e+18,
95
  "train_batch_size": 16,
96
  "trial_name": null,
97
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6aaad4f213d3e82064031a262f630e6c9942e8059015557322a33d65c1ae8f1a
3
- size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00bee88cfff70ceb036e8da2a203c831e95fa4809539bbcfa0257eb67b695712
3
+ size 5240