Update README.md
Browse files
README.md
CHANGED
@@ -106,6 +106,11 @@ print(generated_text)
|
|
106 |
```
|
107 |
|
108 |
# unsloth Train examples
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
```python
|
111 |
from unsloth import FastLanguageModel
|
@@ -118,7 +123,7 @@ load_in_4bit = True # Use 4-bit quantization to reduce memory usage
|
|
118 |
|
119 |
# Load the pre-trained model and tokenizer
|
120 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
121 |
-
model_name="
|
122 |
max_seq_length=max_seq_length,
|
123 |
dtype=dtype,
|
124 |
load_in_4bit=load_in_4bit,
|
|
|
106 |
```
|
107 |
|
108 |
# unsloth Train examples
|
109 |
+
```bash
|
110 |
+
python3 -m venv Llama-3.1-Nemotron-Nano-Train
|
111 |
+
source Llama-3.1-Nemotron-Nano-Train/bin/activate
|
112 |
+
pip install unsloth
|
113 |
+
```
|
114 |
|
115 |
```python
|
116 |
from unsloth import FastLanguageModel
|
|
|
123 |
|
124 |
# Load the pre-trained model and tokenizer
|
125 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
126 |
+
model_name="aifeifei798/Llama-3.1-Nemotron-Nano-8B-v1-bnb-4bit", # Local path to the model
|
127 |
max_seq_length=max_seq_length,
|
128 |
dtype=dtype,
|
129 |
load_in_4bit=load_in_4bit,
|