papasega commited on
Commit
a6d3d6a
·
verified ·
1 Parent(s): 010c45b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -28,7 +28,7 @@ import torch
28
  # Load the finetuned model
29
  model, tokenizer = FastLanguageModel.from_pretrained(
30
  model_name = "papasega/gpt-oss-20b-mxfp4-HF4-Multilingual-Thinking", # Replace with your model name if different
31
- max_seq_length = 1024, # Set to the max_seq_length used during training
32
  dtype = None, # Use None for auto detection
33
  load_in_4bit = True, # Set to True if you saved in 4bit
34
  )
 
28
  # Load the finetuned model
29
  model, tokenizer = FastLanguageModel.from_pretrained(
30
  model_name = "papasega/gpt-oss-20b-mxfp4-HF4-Multilingual-Thinking", # Replace with your model name if different
31
+ max_seq_length = 128, # Set to the max_seq_length you want
32
  dtype = None, # Use None for auto detection
33
  load_in_4bit = True, # Set to True if you saved in 4bit
34
  )