Helw150 commited on
Commit
56ef403
·
1 Parent(s): 1a1ab26

llama-8b-tootsie-0.001-19ad63: step-660000

Browse files
config.json CHANGED
@@ -1 +1 @@
1
- {"vocab_size": 128256, "max_position_embeddings": 4096, "hidden_size": 4096, "intermediate_size": 14336, "num_hidden_layers": 32, "num_attention_heads": 32, "num_key_value_heads": 8, "hidden_act": "silu", "initializer_range": 0.02, "rms_norm_eps": 1e-05, "pretraining_tp": 1, "use_cache": true, "rope_theta": 10000, "rope_scaling": null, "attention_bias": false, "attention_dropout": 0.0, "mlp_bias": false, "head_dim": 128, "return_dict": true, "output_hidden_states": false, "output_attentions": false, "torchscript": false, "torch_dtype": null, "use_bfloat16": false, "tf_legacy_loss": false, "pruned_heads": {}, "tie_word_embeddings": false, "chunk_size_feed_forward": 0, "is_encoder_decoder": false, "is_decoder": false, "cross_attention_hidden_size": null, "add_cross_attention": false, "tie_encoder_decoder": false, "max_length": 20, "min_length": 0, "do_sample": false, "early_stopping": false, "num_beams": 1, "num_beam_groups": 1, "diversity_penalty": 0.0, "temperature": 1.0, "top_k": 50, "top_p": 1.0, "typical_p": 1.0, "repetition_penalty": 1.0, "length_penalty": 1.0, "no_repeat_ngram_size": 0, "encoder_no_repeat_ngram_size": 0, "bad_words_ids": null, "num_return_sequences": 1, "output_scores": false, "return_dict_in_generate": false, "forced_bos_token_id": null, "forced_eos_token_id": null, "remove_invalid_values": false, "exponential_decay_length_penalty": null, "suppress_tokens": null, "begin_suppress_tokens": [128000, 128001], "architectures": ["LlamaForCausalLM"], "finetuning_task": null, "id2label": {"0": "LABEL_0", "1": "LABEL_1"}, "label2id": {"LABEL_0": 0, "LABEL_1": 1}, "tokenizer_class": null, "prefix": null, "bos_token_id": 128000, "pad_token_id": 128001, "eos_token_id": 128001, "sep_token_id": null, "decoder_start_token_id": 128000, "task_specific_params": null, "problem_type": null, "_name_or_path": "", "_attn_implementation_autoset": false, "transformers_version": "4.46.2", "model_type": "llama"}
 
1
+ {"vocab_size": 128256, "max_position_embeddings": 4096, "hidden_size": 4096, "intermediate_size": 14336, "num_hidden_layers": 32, "num_attention_heads": 32, "num_key_value_heads": 8, "hidden_act": "silu", "initializer_range": 0.02, "rms_norm_eps": 1e-05, "pretraining_tp": 1, "use_cache": true, "rope_theta": 10000, "rope_scaling": null, "attention_bias": false, "attention_dropout": 0.0, "mlp_bias": false, "head_dim": 128, "return_dict": true, "output_hidden_states": false, "output_attentions": false, "torchscript": false, "torch_dtype": null, "use_bfloat16": false, "tf_legacy_loss": false, "pruned_heads": {}, "tie_word_embeddings": false, "chunk_size_feed_forward": 0, "is_encoder_decoder": false, "is_decoder": false, "cross_attention_hidden_size": null, "add_cross_attention": false, "tie_encoder_decoder": false, "max_length": 20, "min_length": 0, "do_sample": false, "early_stopping": false, "num_beams": 1, "num_beam_groups": 1, "diversity_penalty": 0.0, "temperature": 1.0, "top_k": 50, "top_p": 1.0, "typical_p": 1.0, "repetition_penalty": 1.0, "length_penalty": 1.0, "no_repeat_ngram_size": 0, "encoder_no_repeat_ngram_size": 0, "bad_words_ids": null, "num_return_sequences": 1, "output_scores": false, "return_dict_in_generate": false, "forced_bos_token_id": null, "forced_eos_token_id": null, "remove_invalid_values": false, "exponential_decay_length_penalty": null, "suppress_tokens": null, "begin_suppress_tokens": [128000, 128001], "architectures": ["LlamaForCausalLM"], "finetuning_task": null, "id2label": {"0": "LABEL_0", "1": "LABEL_1"}, "label2id": {"LABEL_0": 0, "LABEL_1": 1}, "tokenizer_class": null, "prefix": null, "bos_token_id": 128000, "pad_token_id": null, "eos_token_id": 128001, "sep_token_id": null, "decoder_start_token_id": 128000, "task_specific_params": null, "problem_type": null, "_name_or_path": "", "_attn_implementation_autoset": false, "transformers_version": "4.46.2", "model_type": "llama"}
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90b688b1c68b88de9e4421e5dc911c0893dbb0851d71948018527ac610c22016
3
  size 9831465704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05dc186bf3ef7de2e109d0a9f5c48d5bea2f51918e1ae68284c79e25ca7b10df
3
  size 9831465704
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6b6bb123d65d25c87027205188a59df3cec0f47f10b59f96d68fc6dab7d2e36
3
  size 9865007800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bd207ebb641fc2a6374997f48f27fa5a0914e41ead683def80d9a462a2f15b3
3
  size 9865007800
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9197f5cbcb3623cfc809c3a297f050ed9baa0e9af61f6385e1b3ce3e660f3549
3
  size 8221912272
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb5c4b1476ac64bdaa37c87d7a306167d785a5bcffb3011be270f39f98100389
3
  size 8221912272
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f84c3daca0b87ad1b5bfdf32caaa617c14a83374434c82cdeeffb6fc691a36c1
3
  size 4202692840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62f96618574822dfdda7524877b37f6d8fecf2c198190f3d7a77d4a50d68759b
3
  size 4202692840
special_tokens_map.json CHANGED
@@ -12,6 +12,5 @@
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
- },
16
- "pad_token": "<|end_of_text|>"
17
  }
 
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
+ }
 
16
  }
tokenizer_config.json CHANGED
@@ -2057,6 +2057,5 @@
2057
  "attention_mask"
2058
  ],
2059
  "model_max_length": 131072,
2060
- "pad_token": "<|end_of_text|>",
2061
  "tokenizer_class": "PreTrainedTokenizerFast"
2062
  }
 
2057
  "attention_mask"
2058
  ],
2059
  "model_max_length": 131072,
 
2060
  "tokenizer_class": "PreTrainedTokenizerFast"
2061
  }