Update README.md
Browse files
README.md
CHANGED
|
@@ -28,7 +28,7 @@ import torch
|
|
| 28 |
# Load the finetuned model
|
| 29 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 30 |
model_name = "papasega/gpt-oss-20b-mxfp4-HF4-Multilingual-Thinking", # Replace with your model name if different
|
| 31 |
-
max_seq_length =
|
| 32 |
dtype = None, # Use None for auto detection
|
| 33 |
load_in_4bit = True, # Set to True if you saved in 4bit
|
| 34 |
)
|
|
|
|
| 28 |
# Load the finetuned model
|
| 29 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 30 |
model_name = "papasega/gpt-oss-20b-mxfp4-HF4-Multilingual-Thinking", # Replace with your model name if different
|
| 31 |
+
max_seq_length = 128, # Set to the max_seq_length you want
|
| 32 |
dtype = None, # Use None for auto detection
|
| 33 |
load_in_4bit = True, # Set to True if you saved in 4bit
|
| 34 |
)
|