fafiz commited on
Commit
9b11185
·
1 Parent(s): 7b1a344

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -1,10 +1,11 @@
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
 
4
- model_name = 'prithivida/parrot_paraphraser_on_T5'
5
  torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
6
- tokenizer = AutoTokenizer.from_pretrained("prithivida/parrot_paraphraser_on_T5")
7
- model = AutoModelForSeq2SeqLM.from_pretrained("prithivida/parrot_paraphraser_on_T5")
 
8
  def get_response(input_text,num_return_sequences):
9
  batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=100, return_tensors="pt").to(torch_device)
10
  translated = model.generate(**batch,max_length=100,num_beams=10, num_return_sequences=num_return_sequences, temperature=0.9)
 
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
 
4
+ model_name = 'ramsrigouthamg/t5_sentence_paraphraser'
5
  torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
6
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
7
+ tokenizer = AutoTokenizer.from_pretrained("ramsrigouthamg/t5_sentence_paraphraser")
8
+ model = AutoModelForSeq2SeqLM.from_pretrained("ramsrigouthamg/t5_sentence_paraphraser")
9
  def get_response(input_text,num_return_sequences):
10
  batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=100, return_tensors="pt").to(torch_device)
11
  translated = model.generate(**batch,max_length=100,num_beams=10, num_return_sequences=num_return_sequences, temperature=0.9)