mobicham commited on
Commit
f46f560
·
verified ·
1 Parent(s): c557688

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -37,7 +37,8 @@ model_id = "mobiuslabsgmbh/DeepSeek-R1-ReDistill-Qwen-1.5B-v1.0"
37
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=compute_dtype, attn_implementation="sdpa", device_map=device)
38
  tokenizer = AutoTokenizer.from_pretrained(model_id)
39
 
40
- chat = tokenizer.apply_chat_template([{"role":"user", "content":"What is 1.5+102.2?"}], tokenize=True, add_generation_prompt=True, return_tensors="pt")
 
41
  outputs = model.generate(chat.to(device), max_new_tokens=1024, do_sample=True)
42
  print(tokenizer.decode(outputs[0]))
43
  ```
 
37
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=compute_dtype, attn_implementation="sdpa", device_map=device)
38
  tokenizer = AutoTokenizer.from_pretrained(model_id)
39
 
40
+ prompt = "What is 1.5+102.2?"
41
+ chat = tokenizer.apply_chat_template([{"role":"user", "content":prompt}], tokenize=True, add_generation_prompt=True, return_tensors="pt")
42
  outputs = model.generate(chat.to(device), max_new_tokens=1024, do_sample=True)
43
  print(tokenizer.decode(outputs[0]))
44
  ```