Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ bnb_config = BitsAndBytesConfig(
|
|
13 |
# Load model and tokenizer
|
14 |
model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True, device_map="cuda", quantization_config=bnb_config)
|
15 |
tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
|
16 |
-
|
17 |
|
18 |
# Define the response function
|
19 |
@spaces.GPU
|
@@ -50,7 +50,7 @@ def respond(
|
|
50 |
|
51 |
# Decode the generated response
|
52 |
response = tokenizer.decode(generate_ids[0], skip_special_tokens=True)
|
53 |
-
response = response.split(f"
|
54 |
|
55 |
return response
|
56 |
|
|
|
13 |
# Load model and tokenizer
|
14 |
model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True, device_map="cuda", quantization_config=bnb_config)
|
15 |
tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
|
16 |
+
tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + ':\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>ζι:\n' }}{% endif %}" # Be careful that this model used custom chat template.
|
17 |
|
18 |
# Define the response function
|
19 |
@spaces.GPU
|
|
|
50 |
|
51 |
# Decode the generated response
|
52 |
response = tokenizer.decode(generate_ids[0], skip_special_tokens=True)
|
53 |
+
response = response.split(f"θεΈ:\n{message}\nζι:\n")[1]
|
54 |
|
55 |
return response
|
56 |
|