Update README.md
Browse files
README.md
CHANGED
|
@@ -9,7 +9,9 @@ tags:
|
|
| 9 |
- sft
|
| 10 |
license: apache-2.0
|
| 11 |
language:
|
| 12 |
-
-
|
|
|
|
|
|
|
| 13 |
---
|
| 14 |
|
| 15 |
# Uploaded model
|
|
@@ -17,7 +19,41 @@ language:
|
|
| 17 |
- **Developed by:** KasparZ
|
| 18 |
- **License:** apache-2.0
|
| 19 |
- **Finetuned from model :** unsloth/mistral-7b-v0.3-bnb-4bit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
| 22 |
|
| 23 |
-
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
|
|
|
| 9 |
- sft
|
| 10 |
license: apache-2.0
|
| 11 |
language:
|
| 12 |
+
- fr
|
| 13 |
+
datasets:
|
| 14 |
+
- KasparZ/mtext-071024
|
| 15 |
---
|
| 16 |
|
| 17 |
# Uploaded model
|
|
|
|
| 19 |
- **Developed by:** KasparZ
|
| 20 |
- **License:** apache-2.0
|
| 21 |
- **Finetuned from model :** unsloth/mistral-7b-v0.3-bnb-4bit
|
| 22 |
+
- - max_seq_length = 4096
|
| 23 |
+
- tokenizer.pad_token = tokenizer.eos_token
|
| 24 |
+
- model.config.pad_token_id = tokenizer.pad_token_id
|
| 25 |
+
- new_tokens = ["<|s|>", "<|e|>"]
|
| 26 |
+
|
| 27 |
+
- **LoRA**
|
| 28 |
+
- r = 128,
|
| 29 |
+
- target_modules = ["q_proj", "k_proj", "v_proj", "o_proj","gate_proj", "up_proj", "down_proj","embed_tokens", "lm_head"]
|
| 30 |
+
- lora_alpha = 32,
|
| 31 |
+
- lora_dropout = 0,
|
| 32 |
+
- bias = "none",
|
| 33 |
+
- use_gradient_checkpointing = "unsloth",
|
| 34 |
+
- random_state = 3407,
|
| 35 |
+
- use_rslora = True,
|
| 36 |
+
- loftq_config = None,
|
| 37 |
+
|
| 38 |
+
- **Training**
|
| 39 |
+
- per_device_train_batch_size = 1,
|
| 40 |
+
- gradient_accumulation_steps = 8,
|
| 41 |
+
- warmup_ratio = 0.1,
|
| 42 |
+
- num_train_epochs = 2,
|
| 43 |
+
- learning_rate = 1e-4,
|
| 44 |
+
- embedding_learning_rate = 5e-5,
|
| 45 |
+
- fp16 = True,
|
| 46 |
+
- bf16 = False,
|
| 47 |
+
- logging_steps = 1,
|
| 48 |
+
- optim = "adamw_8bit",
|
| 49 |
+
- weight_decay = 0.01,
|
| 50 |
+
- lr_scheduler_type = "cosine",
|
| 51 |
+
- seed = 3407,
|
| 52 |
+
- output_dir = "outputs",
|
| 53 |
+
- save_strategy = "steps",
|
| 54 |
+
- save_steps = 50,
|
| 55 |
+
- report_to = "none",
|
| 56 |
|
| 57 |
This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
|
| 58 |
|
| 59 |
+
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|