tanyuzhou commited on
Commit
ab977cd
·
1 Parent(s): 6f09f80

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -21,8 +21,8 @@ quantization_config = BitsAndBytesConfig(
21
  )
22
 
23
  # Load model and tokenizer
24
- model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True, quantization_config=quantization_config)
25
- tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
26
  tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + ':\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>星野:\n' }}{% endif %}" # Be careful that this model used custom chat template.
27
 
28
  # Define the response function
 
21
  )
22
 
23
  # Load model and tokenizer
24
+ model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True)
25
+ tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True, quantization_config=quantization_config)
26
  tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + ':\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>星野:\n' }}{% endif %}" # Be careful that this model used custom chat template.
27
 
28
  # Define the response function