Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
import torch
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
|
4 |
-
model_name = '
|
5 |
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
6 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
8 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("
|
9 |
def get_response(input_text,num_return_sequences):
|
10 |
batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=100, return_tensors="pt").to(torch_device)
|
11 |
translated = model.generate(**batch,max_length=100,num_beams=10, num_return_sequences=num_return_sequences, temperature=0.9)
|
|
|
1 |
import torch
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
|
4 |
+
model_name = 'ibm/qcpg-sentences'
|
5 |
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
6 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained("ribm/qcpg-sentences")
|
8 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("ibm/qcpg-sentences")
|
9 |
def get_response(input_text,num_return_sequences):
|
10 |
batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=100, return_tensors="pt").to(torch_device)
|
11 |
translated = model.generate(**batch,max_length=100,num_beams=10, num_return_sequences=num_return_sequences, temperature=0.9)
|