DRXD1000 commited on
Commit
313d20b
1 Parent(s): e858f33

Update für Chat Template

Browse files
Files changed (1) hide show
  1. README.md +7 -6
README.md CHANGED
@@ -109,12 +109,13 @@ import torch
109
  from transformers import AutoModelForCausalLM, AutoTokenizer
110
  model = AutoModelForCausalLM.from_pretrained("DRXD1000/Phoenix", torch_dtype=torch.bfloat16, device_map="auto")
111
  tokenizer = AutoTokenizer.from_pretrained("DRXD1000/Phoenix")
112
- prompt = """<|system|>
113
- </s>
114
- <|user|>
115
- Erkläre mir was KI ist.</s>
116
- <|assistant|>
117
- """
 
118
  inputs = tokenizer.apply_chat_template(prompt, return_tensors="pt").to("cuda")
119
  outputs = model.generate(inputs, num_return_sequences=1, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
120
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
109
  from transformers import AutoModelForCausalLM, AutoTokenizer
110
  model = AutoModelForCausalLM.from_pretrained("DRXD1000/Phoenix", torch_dtype=torch.bfloat16, device_map="auto")
111
  tokenizer = AutoTokenizer.from_pretrained("DRXD1000/Phoenix")
112
+ prompt = messages = [
113
+ {
114
+ "role": "system",
115
+ "content": "", #Not recommended. Phoenix does not react well on system prompts
116
+ },
117
+ {"role": "user", "content": "Erkläre mir was KI ist"},
118
+ ]
119
  inputs = tokenizer.apply_chat_template(prompt, return_tensors="pt").to("cuda")
120
  outputs = model.generate(inputs, num_return_sequences=1, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
121
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)