QnAD2_gemma7b / config.json
Lohith9459's picture
Upload config
ceef6c0 verified
{
"_name_or_path": "/content/G15_QnA_finetuned_gemma/adapter_config.json",
"alpha_pattern": {},
"attention_bias": false,
"attention_dropout": 0.0,
"auto_mapping": null,
"base_model_name_or_path": "unsloth/gemma-7b-bnb-4bit",
"bias": "none",
"bos_token_id": 2,
"eos_token_id": 1,
"fan_in_fan_out": false,
"head_dim": 256,
"hidden_act": "gelu_pytorch_tanh",
"hidden_activation": null,
"hidden_size": 3072,
"inference_mode": true,
"init_lora_weights": true,
"initializer_range": 0.02,
"intermediate_size": 24576,
"layer_replication": null,
"layers_pattern": null,
"layers_to_transform": null,
"loftq_config": {},
"lora_alpha": 16,
"lora_dropout": 0,
"max_position_embeddings": 8192,
"megatron_config": null,
"megatron_core": "megatron.core",
"model_type": "gemma",
"modules_to_save": null,
"num_attention_heads": 16,
"num_hidden_layers": 28,
"num_key_value_heads": 16,
"pad_token_id": 0,
"peft_type": "LORA",
"r": 16,
"rank_pattern": {},
"revision": null,
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 10000.0,
"target_modules": [
"o_proj",
"q_proj",
"gate_proj",
"v_proj",
"up_proj",
"k_proj",
"down_proj"
],
"task_type": "CAUSAL_LM",
"transformers_version": "4.44.2",
"use_cache": true,
"use_dora": false,
"use_rslora": false,
"vocab_size": 256000
}