j2moreno commited on
Commit
5e884e7
1 Parent(s): 6698b86
Files changed (1) hide show
  1. app.py +13 -2
app.py CHANGED
@@ -43,10 +43,21 @@ examples=[
43
  # print("You downvoted this response: " + data.value)
44
 
45
  @spaces.GPU
46
- def generate_response(text):
47
  set_seed(SEED)
48
 
49
- tokenized_prompt = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
 
 
 
 
 
 
 
 
 
 
 
50
  # print(tokenized_prompt)
51
 
52
  output_sequences = model.generate(**tokenized_prompt, max_length=1024, num_return_sequences=1)
 
43
  # print("You downvoted this response: " + data.value)
44
 
45
  @spaces.GPU
46
+ def generate_response(text, chatbot, system_prompt="",):
47
  set_seed(SEED)
48
 
49
+ if system_prompt != "":
50
+ input_prompt = f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n "
51
+ else:
52
+ input_prompt = f"<s>[INST] "
53
+
54
+ for interaction in chatbot:
55
+ input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s>[INST] "
56
+
57
+ input_prompt = input_prompt + str(message) + " [/INST] "
58
+ print(input_prompt)
59
+
60
+ tokenized_prompt = tokenizer(input_prompt, return_tensors="pt", padding=True, truncation=True, max_length=128)
61
  # print(tokenized_prompt)
62
 
63
  output_sequences = model.generate(**tokenized_prompt, max_length=1024, num_return_sequences=1)