vpakarinen commited on
Commit
18e4f90
·
verified ·
1 Parent(s): 174aaf1

Upload 10 files

Browse files
README.md CHANGED
@@ -1,3 +1,133 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: apache-2.0
4
+ base_model: ibm-granite/granite-3.3-8b-instruct
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - ICEPVP8977/Uncensored_Small_Test_Time_Compute
9
+ model-index:
10
+ - name: outputs/mymodel
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
18
+ <details><summary>See axolotl config</summary>
19
+
20
+ axolotl version: `0.8.0.dev0`
21
+ ```yaml
22
+ adapter: lora
23
+ base_model: ibm-granite/granite-3.3-8b-instruct
24
+ bf16: auto
25
+ dataset_processes: 32
26
+ chat_template: alpaca
27
+ per_device_train_batch_size: 1
28
+ datasets:
29
+ - message_property_mappings:
30
+ content: content
31
+ role: role
32
+ path: ICEPVP8977/Uncensored_Small_Test_Time_Compute
33
+ type: alpaca
34
+ trust_remote_code: false
35
+ gradient_accumulation_steps: 1
36
+ gradient_checkpointing: true
37
+ learning_rate: 0.0002
38
+ lisa_layers_attribute: model.layers
39
+ load_best_model_at_end: false
40
+ load_in_4bit: true
41
+ load_in_8bit: false
42
+ lora_alpha: 16
43
+ lora_dropout: 0.05
44
+ lora_r: 8
45
+ lora_target_modules:
46
+ - q_proj
47
+ - v_proj
48
+ - k_proj
49
+ - o_proj
50
+ - gate_proj
51
+ - down_proj
52
+ - up_proj
53
+ loraplus_lr_embedding: 1.0e-06
54
+ lr_scheduler: cosine
55
+ max_prompt_len: 512
56
+ mean_resizing_embeddings: false
57
+ micro_batch_size: 8
58
+ num_epochs: 1.0
59
+ optimizer: paged_adamw_8bit
60
+ output_dir: ./outputs/mymodel
61
+ pretrain_multipack_attn: true
62
+ pretrain_multipack_buffer_size: 10000
63
+ qlora_sharded_model_loading: false
64
+ ray_num_workers: 1
65
+ resources_per_worker:
66
+ GPU: 1
67
+ sample_packing_bin_size: 200
68
+ sample_packing_group_size: 100000
69
+ save_only_model: false
70
+ save_safetensors: true
71
+ sequence_len: 4096
72
+ shuffle_merged_datasets: true
73
+ skip_prepare_dataset: false
74
+ strict: false
75
+ train_on_inputs: false
76
+ trl:
77
+ log_completions: false
78
+ ref_model_mixup_alpha: 0.9
79
+ ref_model_sync_steps: 64
80
+ sync_ref_model: false
81
+ use_vllm: false
82
+ vllm_device: auto
83
+ vllm_dtype: auto
84
+ vllm_gpu_memory_utilization: 0.9
85
+ use_ray: false
86
+ val_set_size: 0.0
87
+ weight_decay: 0.0
88
+
89
+ ```
90
+
91
+ </details><br>
92
+
93
+ # outputs/mymodel
94
+
95
+ This model is a fine-tuned version of [ibm-granite/granite-3.3-8b-instruct](https://huggingface.co/ibm-granite/granite-3.3-8b-instruct) on the ICEPVP8977/Uncensored_Small_Test_Time_Compute dataset.
96
+
97
+ ## Model description
98
+
99
+ More information needed
100
+
101
+ ## Intended uses & limitations
102
+
103
+ More information needed
104
+
105
+ ## Training and evaluation data
106
+
107
+ More information needed
108
+
109
+ ## Training procedure
110
+
111
+ ### Training hyperparameters
112
+
113
+ The following hyperparameters were used during training:
114
+ - learning_rate: 0.0002
115
+ - train_batch_size: 8
116
+ - eval_batch_size: 8
117
+ - seed: 42
118
+ - optimizer: Use OptimizerNames.PAGED_ADAMW_8BIT with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
119
+ - lr_scheduler_type: cosine
120
+ - lr_scheduler_warmup_steps: 17
121
+ - num_epochs: 1.0
122
+
123
+ ### Training results
124
+
125
+
126
+
127
+ ### Framework versions
128
+
129
+ - PEFT 0.14.0
130
+ - Transformers 4.49.0
131
+ - Pytorch 2.5.1+cu124
132
+ - Datasets 3.2.0
133
+ - Tokenizers 0.21.0
adapter_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "ibm-granite/granite-3.3-8b-instruct",
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": null,
9
+ "inference_mode": true,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 16,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.05,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "o_proj",
27
+ "down_proj",
28
+ "up_proj",
29
+ "v_proj",
30
+ "k_proj",
31
+ "gate_proj",
32
+ "q_proj"
33
+ ],
34
+ "task_type": "CAUSAL_LM",
35
+ "use_dora": false,
36
+ "use_rslora": false
37
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fab7cd3f5599fc3efca31cd1762b6e400f8fc4cb99f02097574fbe6400a1ec2
3
+ size 99033416
added_tokens.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|end_of_cite|>": 49156,
3
+ "<|end_of_plugin|>": 49158,
4
+ "<|end_of_role|>": 49153,
5
+ "<|start_of_cite|>": 49155,
6
+ "<|start_of_plugin|>": 49157,
7
+ "<|start_of_role|>": 49152,
8
+ "<|tool_call|>": 49154
9
+ }
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "ibm-granite/granite-3.3-8b-instruct",
4
+ "architectures": [
5
+ "GraniteForCausalLM"
6
+ ],
7
+ "attention_bias": false,
8
+ "attention_dropout": 0.0,
9
+ "attention_multiplier": 0.0078125,
10
+ "bos_token_id": 0,
11
+ "embedding_multiplier": 12.0,
12
+ "eos_token_id": 0,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 4096,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 12800,
17
+ "logits_scaling": 16.0,
18
+ "max_position_embeddings": 131072,
19
+ "mlp_bias": false,
20
+ "model_type": "granite",
21
+ "num_attention_heads": 32,
22
+ "num_hidden_layers": 40,
23
+ "num_key_value_heads": 8,
24
+ "pad_token_id": 0,
25
+ "quantization_config": {
26
+ "_load_in_4bit": true,
27
+ "_load_in_8bit": false,
28
+ "bnb_4bit_compute_dtype": "float32",
29
+ "bnb_4bit_quant_storage": "uint8",
30
+ "bnb_4bit_quant_type": "fp4",
31
+ "bnb_4bit_use_double_quant": false,
32
+ "llm_int8_enable_fp32_cpu_offload": false,
33
+ "llm_int8_has_fp16_weight": false,
34
+ "llm_int8_skip_modules": null,
35
+ "llm_int8_threshold": 6.0,
36
+ "load_in_4bit": true,
37
+ "load_in_8bit": false,
38
+ "quant_method": "bitsandbytes"
39
+ },
40
+ "residual_multiplier": 0.22,
41
+ "rms_norm_eps": 1e-05,
42
+ "rope_scaling": null,
43
+ "rope_theta": 10000000.0,
44
+ "tie_word_embeddings": true,
45
+ "torch_dtype": "bfloat16",
46
+ "transformers_version": "4.49.0",
47
+ "use_cache": false,
48
+ "vocab_size": 49159
49
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|start_of_role|>",
4
+ "<|end_of_role|>",
5
+ "<|tool_call|>",
6
+ "<|start_of_cite|>",
7
+ "<|end_of_cite|>",
8
+ "<|start_of_plugin|>",
9
+ "<|end_of_plugin|>"
10
+ ],
11
+ "bos_token": {
12
+ "content": "<|end_of_text|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "eos_token": {
19
+ "content": "<|end_of_text|>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "<|end_of_text|>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "unk_token": {
33
+ "content": "<|end_of_text|>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<|end_of_text|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<fim_prefix>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<fim_middle>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<fim_suffix>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "4": {
38
+ "content": "<fim_pad>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "5": {
46
+ "content": "<filename>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "6": {
54
+ "content": "<gh_stars>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "7": {
62
+ "content": "<issue_start>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "8": {
70
+ "content": "<issue_comment>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "9": {
78
+ "content": "<issue_closed>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "10": {
86
+ "content": "<jupyter_start>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "11": {
94
+ "content": "<jupyter_text>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "12": {
102
+ "content": "<jupyter_code>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "13": {
110
+ "content": "<jupyter_output>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "14": {
118
+ "content": "<empty_output>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": true
124
+ },
125
+ "15": {
126
+ "content": "<commit_before>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": true
132
+ },
133
+ "16": {
134
+ "content": "<commit_msg>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": true
140
+ },
141
+ "17": {
142
+ "content": "<commit_after>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": true
148
+ },
149
+ "18": {
150
+ "content": "<reponame>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": true
156
+ },
157
+ "49152": {
158
+ "content": "<|start_of_role|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": true
164
+ },
165
+ "49153": {
166
+ "content": "<|end_of_role|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": true
172
+ },
173
+ "49154": {
174
+ "content": "<|tool_call|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": true
180
+ },
181
+ "49155": {
182
+ "content": "<|start_of_cite|>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": true
188
+ },
189
+ "49156": {
190
+ "content": "<|end_of_cite|>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": true
196
+ },
197
+ "49157": {
198
+ "content": "<|start_of_plugin|>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": true
204
+ },
205
+ "49158": {
206
+ "content": "<|end_of_plugin|>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": true
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|start_of_role|>",
216
+ "<|end_of_role|>",
217
+ "<|tool_call|>",
218
+ "<|start_of_cite|>",
219
+ "<|end_of_cite|>",
220
+ "<|start_of_plugin|>",
221
+ "<|end_of_plugin|>"
222
+ ],
223
+ "bos_token": "<|end_of_text|>",
224
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if message['role'] == 'system' and loop.first %}{{ message['content'] }}{% elif message['role'] == 'user' %}{{ '### Instruction:\n' + message['content'] }}{% elif message['role'] == 'assistant' %}{{ '### Response:\n' + message['content'] + eos_token }}{% endif %}{% if not loop.last %}{{ '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\n\n### Response:\n' }}{% endif %}",
225
+ "clean_up_tokenization_spaces": true,
226
+ "eos_token": "<|end_of_text|>",
227
+ "errors": "replace",
228
+ "extra_special_tokens": {},
229
+ "model_max_length": 9223372036854775807,
230
+ "pad_token": "<|end_of_text|>",
231
+ "padding_side": "left",
232
+ "tokenizer_class": "GPT2Tokenizer",
233
+ "unk_token": "<|end_of_text|>",
234
+ "vocab_size": 49152
235
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff