wxgeorge commited on
Commit
4c36b18
1 Parent(s): 30bad6e

:poop: cheesy "de"chatformatization of response.

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -22,6 +22,7 @@ def respond(message, history, model):
22
  history_openai_format.append({"role": "assistant", "content":assistant})
23
  history_openai_format.append({"role": "user", "content": message})
24
 
 
25
  if model == "mattshumer/Reflection-Llama-3.1-70B":
26
  # chat/completions not working for this model;
27
  # apply chat template locally
@@ -46,7 +47,8 @@ def respond(message, history, model):
46
  # debugger_ran = True
47
  if chunk.choices[0].text is not None:
48
  partial_message = partial_message + chunk.choices[0].text
49
- yield partial_message
 
50
  else:
51
  response = client.chat.completions.create(
52
  model=model,
 
22
  history_openai_format.append({"role": "assistant", "content":assistant})
23
  history_openai_format.append({"role": "user", "content": message})
24
 
25
+
26
  if model == "mattshumer/Reflection-Llama-3.1-70B":
27
  # chat/completions not working for this model;
28
  # apply chat template locally
 
47
  # debugger_ran = True
48
  if chunk.choices[0].text is not None:
49
  partial_message = partial_message + chunk.choices[0].text
50
+ prefix_to_strip = "<|start_header_id|>assistant<|end_header_id|>\n\n"
51
+ yield partial_message[len(prefix_to_strip):]
52
  else:
53
  response = client.chat.completions.create(
54
  model=model,