PhantHive commited on
Commit
8ef6e3f
1 Parent(s): c0b7aa4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -25,9 +25,7 @@ model = PeftModel.from_pretrained(model, model_id)
25
  def greet(text):
26
  with torch.no_grad():
27
  # Include EOS token for better context
28
- input_text = "<s>### Instruction:\nYou are a data analyst tasked with helping students finding resources, respond in JSON format.\n\n" +
29
- f"### Input:\n{text}\n\n" +
30
- "### Response:\n"
31
 
32
  batch = tokenizer(input_text, return_tensors='pt', add_special_tokens=True).to(device)
33
 
@@ -42,8 +40,8 @@ def greet(text):
42
  # Decode only the generated tokens
43
  response = tokenizer.decode(output_tokens[0][len(batch['input_ids'][0]):], skip_special_tokens=True)
44
 
45
- # Additional stopping condition at next "\n### Response:"
46
- response_parts = response.split("\n### Response:")
47
  return response_parts[0] # Return only the first part
48
 
49
  iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain")
 
25
  def greet(text):
26
  with torch.no_grad():
27
  # Include EOS token for better context
28
+ input_text = "<s>### User:\n{text}\n\n### Assistant:\n"
 
 
29
 
30
  batch = tokenizer(input_text, return_tensors='pt', add_special_tokens=True).to(device)
31
 
 
40
  # Decode only the generated tokens
41
  response = tokenizer.decode(output_tokens[0][len(batch['input_ids'][0]):], skip_special_tokens=True)
42
 
43
+ # Additional stopping condition at next "### Response:"
44
+ response_parts = response.split("### Assistant:")
45
  return response_parts[0] # Return only the first part
46
 
47
  iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain")