Update handler.py
Browse files- handler.py +1 -1
handler.py
CHANGED
@@ -45,7 +45,7 @@ class EndpointHandler:
|
|
45 |
inputs = self.tokenizer(prompt+' \n >> <assistant>:', return_tensors="pt").to(device)
|
46 |
inputs = {k: v.to('cuda') for k, v in inputs.items()}
|
47 |
|
48 |
-
output = self.inference_model.generate(input_ids=inputs["input_ids"],pad_token_id=self.tokenizer.pad_token_id, max_new_tokens=
|
49 |
response_raw = self.tokenizer.batch_decode(output.detach().cpu().numpy(), skip_special_tokens=True)
|
50 |
response_ls = response_raw[0].split('>>')
|
51 |
response_ = response_ls[1].split('<assistant>:')[1]
|
|
|
45 |
inputs = self.tokenizer(prompt+' \n >> <assistant>:', return_tensors="pt").to(device)
|
46 |
inputs = {k: v.to('cuda') for k, v in inputs.items()}
|
47 |
|
48 |
+
output = self.inference_model.generate(input_ids=inputs["input_ids"],pad_token_id=self.tokenizer.pad_token_id, max_new_tokens=64, do_sample=True, temperature=0.9, top_p=0.9, repetition_penalty=1.5, early_stopping=True, length_penalty = -0.3, num_beams=5, num_return_sequences=1)
|
49 |
response_raw = self.tokenizer.batch_decode(output.detach().cpu().numpy(), skip_special_tokens=True)
|
50 |
response_ls = response_raw[0].split('>>')
|
51 |
response_ = response_ls[1].split('<assistant>:')[1]
|