Update app.py
Browse files
app.py
CHANGED
@@ -27,7 +27,7 @@ def inference(context, instruction, num_beams:int=4):
|
|
27 |
if use_cuda:
|
28 |
for t in input_tokens:
|
29 |
if torch.is_tensor(input_tokens[t]):
|
30 |
-
input_tokens[t] = input_tokens[t].to(
|
31 |
|
32 |
output = model.generate(
|
33 |
input_tokens['input_ids'],
|
@@ -42,7 +42,7 @@ def inference(context, instruction, num_beams:int=4):
|
|
42 |
output_tokens = output.sequences
|
43 |
generated_tokens = output_tokens[:, num_input_tokens:]
|
44 |
num_generated_tokens = (generated_tokens != tokenizer.pad_token_id).sum(dim=-1).tolist()[0]
|
45 |
-
prefix_to_add = torch.tensor([[tokenizer("A")["input_ids"][0]]]).to(
|
46 |
generated_tokens = torch.cat([prefix_to_add, generated_tokens], dim=1)
|
47 |
generated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
48 |
string_output = [i[1:].strip() for i in generated_text][0]
|
|
|
27 |
if use_cuda:
|
28 |
for t in input_tokens:
|
29 |
if torch.is_tensor(input_tokens[t]):
|
30 |
+
input_tokens[t] = input_tokens[t].to("cuda")
|
31 |
|
32 |
output = model.generate(
|
33 |
input_tokens['input_ids'],
|
|
|
42 |
output_tokens = output.sequences
|
43 |
generated_tokens = output_tokens[:, num_input_tokens:]
|
44 |
num_generated_tokens = (generated_tokens != tokenizer.pad_token_id).sum(dim=-1).tolist()[0]
|
45 |
+
prefix_to_add = torch.tensor([[tokenizer("A")["input_ids"][0]]]).to("cuda")
|
46 |
generated_tokens = torch.cat([prefix_to_add, generated_tokens], dim=1)
|
47 |
generated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
48 |
string_output = [i[1:].strip() for i in generated_text][0]
|