run480 commited on
Commit
7a2caab
·
verified ·
1 Parent(s): e4a6e36

Update app.py

Browse files

Parameter tuning of Pegasus model.

Files changed (1) hide show
  1. app.py +23 -2
app.py CHANGED
@@ -78,6 +78,27 @@
78
  # Because abstractive summarization involves paraphrasing words, it is also more time-consuming;
79
  # however, it has the potential to produce a more polished and coherent summary.
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  from transformers import PegasusForConditionalGeneration, PegasusTokenizer
82
  import gradio as grad
83
 
@@ -87,8 +108,8 @@ mdl = PegasusForConditionalGeneration.from_pretrained(mdl_name)
87
 
88
  def summarize(text):
89
  tokens = pegasus_tkn(text, truncation=True, padding="longest", return_tensors="pt")
90
- txt_summary = mdl.generate(**tokens)
91
- response = pegasus_tkn.batch_decode(txt_summary, skip_special_tokens=True)
92
  return response
93
 
94
  txt = grad.Textbox(lines=10, label="English", placeholder="English Text here")
 
78
  # Because abstractive summarization involves paraphrasing words, it is also more time-consuming;
79
  # however, it has the potential to produce a more polished and coherent summary.
80
 
81
+ # from transformers import PegasusForConditionalGeneration, PegasusTokenizer
82
+ # import gradio as grad
83
+
84
+ # mdl_name = "google/pegasus-xsum"
85
+ # pegasus_tkn = PegasusTokenizer.from_pretrained(mdl_name)
86
+ # mdl = PegasusForConditionalGeneration.from_pretrained(mdl_name)
87
+
88
+ # def summarize(text):
89
+ # tokens = pegasus_tkn(text, truncation=True, padding="longest", return_tensors="pt")
90
+ # txt_summary = mdl.generate(**tokens)
91
+ # response = pegasus_tkn.batch_decode(txt_summary, skip_special_tokens=True)
92
+ # return response
93
+
94
+ # txt = grad.Textbox(lines=10, label="English", placeholder="English Text here")
95
+ # out = grad.Textbox(lines=10, label="Summary")
96
+
97
+ # grad.Interface(summarize, inputs=txt, outputs=out).launch()
98
+
99
+ #------------------------------------------------------------------------------------------
100
+ # 6. Same model with some tuning with some parameters: num_return_sequences=5, max_length=200, temperature=1.5, num_beams=10
101
+
102
  from transformers import PegasusForConditionalGeneration, PegasusTokenizer
103
  import gradio as grad
104
 
 
108
 
109
  def summarize(text):
110
  tokens = pegasus_tkn(text, truncation=True, padding="longest", return_tensors="pt")
111
+ translated_txt = mdl.generate(**tokens, num_return_sequences=5, max_length=200, temperature=1.5, num_beams=10)
112
+ response = pegasus_tkn.batch_decode(translated_txt, skip_special_tokens=True)
113
  return response
114
 
115
  txt = grad.Textbox(lines=10, label="English", placeholder="English Text here")