mobicham commited on
Commit
f94fcda
·
verified ·
1 Parent(s): 053fbba

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -38,7 +38,8 @@ model_id = "mobiuslabsgmbh/DeepSeek-R1-ReDistill-Qwen-7B-v1.1"
38
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=compute_dtype, attn_implementation="sdpa", device_map=device)
39
  tokenizer = AutoTokenizer.from_pretrained(model_id)
40
 
41
- chat = tokenizer.apply_chat_template([{"role":"user", "content":"What is 1.5+102.2?"}], tokenize=True, add_generation_prompt=True, return_tensors="pt")
 
42
  outputs = model.generate(chat.to(device), max_new_tokens=1024, do_sample=True)
43
  print(tokenizer.decode(outputs[0]))
44
  ```
 
38
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=compute_dtype, attn_implementation="sdpa", device_map=device)
39
  tokenizer = AutoTokenizer.from_pretrained(model_id)
40
 
41
+ prompt = "What is 1.5+102.2?"
42
+ chat = tokenizer.apply_chat_template([{"role":"user", "content":prompt}], tokenize=True, add_generation_prompt=True, return_tensors="pt")
43
  outputs = model.generate(chat.to(device), max_new_tokens=1024, do_sample=True)
44
  print(tokenizer.decode(outputs[0]))
45
  ```