Technozam commited on
Commit
c8f326c
1 Parent(s): b9948f6

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +533 -0
app.py ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """McqGenerator.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1_1AYqOfr649dp8QiMuV3GPiNH_bMdpUA
8
+
9
+ ## Installation of libraries
10
+
11
+ ## Example 1
12
+ """
13
+
14
+ from textwrap3 import wrap
15
+
16
+ text = """Elon Musk has shown again he can influence the digital currency market with just his tweets. After saying that his electric vehicle-making company
17
+ Tesla will not accept payments in Bitcoin because of environmental concerns, he tweeted that he was working with developers of Dogecoin to improve
18
+ system transaction efficiency. Following the two distinct statements from him, the world's largest cryptocurrency hit a two-month low, while Dogecoin
19
+ rallied by about 20 percent. The SpaceX CEO has in recent months often tweeted in support of Dogecoin, but rarely for Bitcoin. In a recent tweet,
20
+ Musk put out a statement from Tesla that it was “concerned” about the rapidly increasing use of fossil fuels for Bitcoin (price in India) mining and
21
+ transaction, and hence was suspending vehicle purchases using the cryptocurrency. A day later he again tweeted saying, “To be clear, I strongly
22
+ believe in crypto, but it can't drive a massive increase in fossil fuel use, especially coal”. It triggered a downward spiral for Bitcoin value but
23
+ the cryptocurrency has stabilised since. A number of Twitter users welcomed Musk's statement. One of them said it's time people started realising
24
+ that Dogecoin “is here to stay” and another referred to Musk's previous assertion that crypto could become the world's future currency."""
25
+
26
+ for wrp in wrap(text, 150):
27
+ print (wrp)
28
+ print ("\n")
29
+
30
+ """## Example 2"""
31
+
32
+ import torch
33
+ from transformers import T5ForConditionalGeneration,T5Tokenizer
34
+ summary_model = T5ForConditionalGeneration.from_pretrained('t5-base')
35
+ summary_tokenizer = T5Tokenizer.from_pretrained('t5-base')
36
+
37
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
38
+ summary_model = summary_model.to(device)
39
+
40
+ import random
41
+ import numpy as np
42
+
43
+ def set_seed(seed: int):
44
+ random.seed(seed)
45
+ np.random.seed(seed)
46
+ torch.manual_seed(seed)
47
+ torch.cuda.manual_seed_all(seed)
48
+
49
+ set_seed(42)
50
+
51
+ import nltk
52
+ # nltk.download('punkt')
53
+ # nltk.download('brown')
54
+ # nltk.download('wordnet')
55
+ from nltk.corpus import wordnet as wn
56
+ from nltk.tokenize import sent_tokenize
57
+
58
+ def postprocesstext (content):
59
+ final=""
60
+ for sent in sent_tokenize(content):
61
+ sent = sent.capitalize()
62
+ final = final +" "+sent
63
+ return final
64
+
65
+
66
+ def summarizer(text,model,tokenizer):
67
+ text = text.strip().replace("\n"," ")
68
+ text = "summarize: "+text
69
+ # print (text)
70
+ max_len = 512
71
+ encoding = tokenizer.encode_plus(text,max_length=max_len, pad_to_max_length=False,truncation=True, return_tensors="pt").to(device)
72
+
73
+ input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
74
+
75
+ outs = model.generate(input_ids=input_ids,
76
+ attention_mask=attention_mask,
77
+ early_stopping=True,
78
+ num_beams=3,
79
+ num_return_sequences=1,
80
+ no_repeat_ngram_size=2,
81
+ min_length = 75,
82
+ max_length=300)
83
+
84
+
85
+ dec = [tokenizer.decode(ids,skip_special_tokens=True) for ids in outs]
86
+ summary = dec[0]
87
+ summary = postprocesstext(summary)
88
+ summary= summary.strip()
89
+
90
+ return summary
91
+
92
+
93
+ summarized_text = summarizer(text,summary_model,summary_tokenizer)
94
+
95
+
96
+ print ("\noriginal Text >>")
97
+ for wrp in wrap(text, 150):
98
+ print (wrp)
99
+ print ("\n")
100
+ print ("Summarized Text >>")
101
+ for wrp in wrap(summarized_text, 150):
102
+ print (wrp)
103
+ print ("\n")
104
+
105
+ """# **Answer Span Extraction (Keywords and Noun Phrases)**"""
106
+
107
+ total = 10
108
+
109
+ import nltk
110
+ nltk.download('stopwords')
111
+ from nltk.corpus import stopwords
112
+ import string
113
+ import pke
114
+ import traceback
115
+
116
+ def get_nouns_multipartite(content):
117
+ out=[]
118
+ try:
119
+ extractor = pke.unsupervised.MultipartiteRank()
120
+ extractor.load_document(input=content,language='en')
121
+ # not contain punctuation marks or stopwords as candidates.
122
+ pos = {'PROPN','NOUN'}
123
+ #pos = {'PROPN','NOUN'}
124
+ stoplist = list(string.punctuation)
125
+ stoplist += ['-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-']
126
+ stoplist += stopwords.words('english')
127
+ # extractor.candidate_selection(pos=pos, stoplist=stoplist)
128
+ extractor.candidate_selection(pos=pos)
129
+ # 4. build the Multipartite graph and rank candidates using random walk,
130
+ # alpha controls the weight adjustment mechanism, see TopicRank for
131
+ # threshold/method parameters.
132
+ extractor.candidate_weighting(alpha=1.1,
133
+ threshold=0.75,
134
+ method='average')
135
+ keyphrases = extractor.get_n_best(n=15)
136
+
137
+
138
+ for val in keyphrases:
139
+ out.append(val[0])
140
+ except:
141
+ out = []
142
+ traceback.print_exc()
143
+
144
+ return out
145
+
146
+ from flashtext import KeywordProcessor
147
+
148
+
149
+ def get_keywords(originaltext,summarytext,total):
150
+ keywords = get_nouns_multipartite(originaltext)
151
+ print ("keywords unsummarized: ",keywords)
152
+ keyword_processor = KeywordProcessor()
153
+ for keyword in keywords:
154
+ keyword_processor.add_keyword(keyword)
155
+
156
+ keywords_found = keyword_processor.extract_keywords(summarytext)
157
+ keywords_found = list(set(keywords_found))
158
+ print ("keywords_found in summarized: ",keywords_found)
159
+
160
+ important_keywords =[]
161
+ for keyword in keywords:
162
+ if keyword in keywords_found:
163
+ important_keywords.append(keyword)
164
+
165
+ return important_keywords[:total]
166
+
167
+
168
+ imp_keywords = get_keywords(text,summarized_text,total)
169
+ print (imp_keywords)
170
+
171
+ """# **Question generation with T5**"""
172
+
173
+ question_model = T5ForConditionalGeneration.from_pretrained('ramsrigouthamg/t5_squad_v1')
174
+ question_tokenizer = T5Tokenizer.from_pretrained('ramsrigouthamg/t5_squad_v1')
175
+ question_model = question_model.to(device)
176
+
177
+ def get_question(context,answer,model,tokenizer):
178
+ text = "context: {} answer: {}".format(context,answer)
179
+ encoding = tokenizer.encode_plus(text,max_length=384, pad_to_max_length=False,truncation=True, return_tensors="pt").to(device)
180
+ input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
181
+
182
+ outs = model.generate(input_ids=input_ids,
183
+ attention_mask=attention_mask,
184
+ early_stopping=True,
185
+ num_beams=5,
186
+ num_return_sequences=1,
187
+ no_repeat_ngram_size=2,
188
+ max_length=72)
189
+
190
+
191
+ dec = [tokenizer.decode(ids,skip_special_tokens=True) for ids in outs]
192
+
193
+
194
+ Question = dec[0].replace("question:","")
195
+ Question= Question.strip()
196
+ return Question
197
+
198
+
199
+
200
+ for wrp in wrap(summarized_text, 150):
201
+ print (wrp)
202
+ print ("\n")
203
+
204
+ for answer in imp_keywords:
205
+ ques = get_question(summarized_text,answer,question_model,question_tokenizer)
206
+ print (ques)
207
+ print (answer.capitalize())
208
+ print ("\n")
209
+
210
+ """# **Gradio UI Visualization**"""
211
+
212
+ import numpy as np
213
+ from sense2vec import Sense2Vec
214
+ s2v = Sense2Vec().from_disk('../s2v_old')
215
+
216
+ from sentence_transformers import SentenceTransformer
217
+ # paraphrase-distilroberta-base-v1
218
+ sentence_transformer_model = SentenceTransformer('msmarco-distilbert-base-v3')
219
+
220
+ from similarity.normalized_levenshtein import NormalizedLevenshtein
221
+ normalized_levenshtein = NormalizedLevenshtein()
222
+
223
+ def filter_same_sense_words(original,wordlist):
224
+ filtered_words=[]
225
+ base_sense =original.split('|')[1]
226
+ print (base_sense)
227
+ for eachword in wordlist:
228
+ if eachword[0].split('|')[1] == base_sense:
229
+ filtered_words.append(eachword[0].split('|')[0].replace("_", " ").title().strip())
230
+ return filtered_words
231
+
232
+ def get_highest_similarity_score(wordlist,wrd):
233
+ score=[]
234
+ for each in wordlist:
235
+ score.append(normalized_levenshtein.similarity(each.lower(),wrd.lower()))
236
+ return max(score)
237
+
238
+ def sense2vec_get_words(word,s2v,topn,question):
239
+ output = []
240
+ print ("word ",word)
241
+ try:
242
+ sense = s2v.get_best_sense(word, senses= ["NOUN", "PERSON","PRODUCT","LOC","ORG","EVENT","NORP","WORK OF ART","FAC","GPE","NUM","FACILITY"])
243
+ most_similar = s2v.most_similar(sense, n=topn)
244
+ # print (most_similar)
245
+ output = filter_same_sense_words(sense,most_similar)
246
+ print ("Similar ",output)
247
+ except:
248
+ output =[]
249
+
250
+ threshold = 0.6
251
+ final=[word]
252
+ checklist =question.split()
253
+ for x in output:
254
+ if get_highest_similarity_score(final,x)<threshold and x not in final and x not in checklist:
255
+ final.append(x)
256
+
257
+ return final[1:]
258
+
259
+ def mmr(doc_embedding, word_embeddings, words, top_n, lambda_param):
260
+
261
+ # Extract similarity within words, and between words and the document
262
+ word_doc_similarity = cosine_similarity(word_embeddings, doc_embedding)
263
+ word_similarity = cosine_similarity(word_embeddings)
264
+
265
+ # Initialize candidates and already choose best keyword/keyphrase
266
+ keywords_idx = [np.argmax(word_doc_similarity)]
267
+ candidates_idx = [i for i in range(len(words)) if i != keywords_idx[0]]
268
+
269
+ for _ in range(top_n - 1):
270
+ # Extract similarities within candidates and
271
+ # between candidates and selected keywords/phrases
272
+ candidate_similarities = word_doc_similarity[candidates_idx, :]
273
+ target_similarities = np.max(word_similarity[candidates_idx][:, keywords_idx], axis=1)
274
+
275
+ # Calculate MMR
276
+ mmr = (lambda_param) * candidate_similarities - (1-lambda_param) * target_similarities.reshape(-1, 1)
277
+ mmr_idx = candidates_idx[np.argmax(mmr)]
278
+
279
+ # Update keywords & candidates
280
+ keywords_idx.append(mmr_idx)
281
+ candidates_idx.remove(mmr_idx)
282
+
283
+ return [words[idx] for idx in keywords_idx]
284
+
285
+ from collections import OrderedDict
286
+ from sklearn.metrics.pairwise import cosine_similarity
287
+
288
+ def get_distractors_wordnet(word):
289
+ distractors=[]
290
+ try:
291
+ syn = wn.synsets(word,'n')[0]
292
+
293
+ word= word.lower()
294
+ orig_word = word
295
+ if len(word.split())>0:
296
+ word = word.replace(" ","_")
297
+ hypernym = syn.hypernyms()
298
+ if len(hypernym) == 0:
299
+ return distractors
300
+ for item in hypernym[0].hyponyms():
301
+ name = item.lemmas()[0].name()
302
+ #print ("name ",name, " word",orig_word)
303
+ if name == orig_word:
304
+ continue
305
+ name = name.replace("_"," ")
306
+ name = " ".join(w.capitalize() for w in name.split())
307
+ if name is not None and name not in distractors:
308
+ distractors.append(name)
309
+ except:
310
+ print ("Wordnet distractors not found")
311
+ return distractors
312
+
313
+ def get_distractors (word,origsentence,sense2vecmodel,sentencemodel,top_n,lambdaval):
314
+ distractors = sense2vec_get_words(word,sense2vecmodel,top_n,origsentence)
315
+ print ("distractors ",distractors)
316
+ if len(distractors) ==0:
317
+ return distractors
318
+ distractors_new = [word.capitalize()]
319
+ distractors_new.extend(distractors)
320
+ # print ("distractors_new .. ",distractors_new)
321
+
322
+ embedding_sentence = origsentence+ " "+word.capitalize()
323
+ # embedding_sentence = word
324
+ keyword_embedding = sentencemodel.encode([embedding_sentence])
325
+ distractor_embeddings = sentencemodel.encode(distractors_new)
326
+
327
+ # filtered_keywords = mmr(keyword_embedding, distractor_embeddings,distractors,4,0.7)
328
+ max_keywords = min(len(distractors_new),5)
329
+ filtered_keywords = mmr(keyword_embedding, distractor_embeddings,distractors_new,max_keywords,lambdaval)
330
+ # filtered_keywords = filtered_keywords[1:]
331
+ final = [word.capitalize()]
332
+ for wrd in filtered_keywords:
333
+ if wrd.lower() !=word.lower():
334
+ final.append(wrd.capitalize())
335
+ final = final[1:]
336
+ return final
337
+
338
+ sent = "What cryptocurrency did Musk rarely tweet about?"
339
+ keyword = "Bitcoin"
340
+
341
+ # sent = "What did Musk say he was working with to improve system transaction efficiency?"
342
+ # keyword= "Dogecoin"
343
+
344
+
345
+ # sent = "What company did Musk say would not accept bitcoin payments?"
346
+ # keyword= "Tesla"
347
+
348
+
349
+ # sent = "What has Musk often tweeted in support of?"
350
+ # keyword = "Cryptocurrency"
351
+
352
+ print (get_distractors(keyword,sent,s2v,sentence_transformer_model,40,0.2))
353
+
354
+ """# **Gradio Visualization with MCQs**"""
355
+
356
+ import mysql.connector
357
+ import datetime;
358
+
359
+ mydb = mysql.connector.connect(
360
+ host="qtechdb-1.cexugk1h8rui.ap-northeast-1.rds.amazonaws.com",
361
+ user="admin",
362
+ password="F3v2vGWzb8vaniE3nqzi",
363
+ database="spring_social"
364
+ )
365
+
366
+ import gradio as gr
367
+ import re
368
+
369
+
370
+ context = gr.Textbox(lines=10, placeholder="Enter paragraph/content here...", label="Enter your content (words input must be more than 150 words).")
371
+ total = gr.Slider(1,10, value=1,step=1, label="Total Number Of Questions")
372
+ subject = gr.Textbox(placeholder="Enter subject/title here...", label="Enter your title (title must contain 1 word)")
373
+
374
+ output = gr.HTML( label="Question and Answers")
375
+
376
+
377
+ def generate_question_text(context,subject,total):
378
+
379
+ words_text = len(re.findall(r'\w+', context))
380
+ words_subject = len(re.findall(r'\w+', subject))
381
+
382
+ if (words_text < 150):
383
+ raise gr.Error("Invalid Input (Words limit must be more than 150 words).")
384
+ # print("Number of words:", words)
385
+
386
+ elif (words_subject < 1):
387
+ raise gr.Error("Invalid Input (Title must be one or more than one word).")
388
+
389
+ else:
390
+ summary_text = summarizer(context,summary_model,summary_tokenizer)
391
+ for wrp in wrap(summary_text, 150):
392
+ print (wrp)
393
+ # np = getnounphrases(summary_text,sentence_transformer_model,3)
394
+ np = get_keywords(context,summary_text,total)
395
+ print ("\n\nNoun phrases",np)
396
+
397
+ output="<b style='color:black;'>Select the correct answer.</b><br><br>"
398
+ i = 1
399
+ for answer in np:
400
+ ques = get_question(summary_text,answer,question_model,question_tokenizer)
401
+ distractors = get_distractors(answer.capitalize(),ques,s2v,sentence_transformer_model,40,0.2)
402
+ # output= output + ques + "\n" + "Ans: "+answer.capitalize() + "\n\n"
403
+ output = output + "<b style='color:black;'>Q"+ str(i) + ") " + ques + "</b>"
404
+ # output = output + "<br>"
405
+ i += 1
406
+ output = output + "<br><b> ▪ " +answer.capitalize()+ "</b>"
407
+ if len(distractors)>0:
408
+ for distractor in distractors[:3]:
409
+ output = output + "<p> ▪ " + distractor+ "</p>"
410
+ output = output + "<br>"
411
+
412
+ output = output + "<b style='color:black;'>" + "Correct Answer Key</b><br>"
413
+
414
+ i = 1
415
+ for answer in np:
416
+ i += 1
417
+ output = output + "<b style='color:green;'> ▪ " +answer.capitalize()+ "</b>"
418
+ output = output + "<br>"
419
+
420
+ # mycursor = mydb.cursor()
421
+ # timedate = datetime.datetime.now()
422
+
423
+ # sql = "INSERT INTO mcqstexts (subject, input, output, timedate) VALUES (%s,%s, %s,%s)"
424
+ # val = (subject, context, output, timedate)
425
+ # mycursor.execute(sql, val)
426
+
427
+ # mydb.commit()
428
+
429
+ # print(mycursor.rowcount, "record inserted.")
430
+
431
+ return output
432
+
433
+
434
+ iface = gr.Interface(
435
+ fn=generate_question_text,
436
+ inputs=[context,subject,total],
437
+ outputs=output,
438
+ allow_flagging="never",flagging_options=["Save Data"])
439
+
440
+ # iface.launch(debug=True, share=True)
441
+
442
+ def generate_question(context,subject,total):
443
+ summary_text = summarizer(context,summary_model,summary_tokenizer)
444
+ for wrp in wrap(summary_text, 150):
445
+ print (wrp)
446
+ # np = getnounphrases(summary_text,sentence_transformer_model,3)
447
+ np = get_keywords(context,summary_text,total)
448
+ print ("\n\nNoun phrases",np)
449
+
450
+ output="<b style='color:black;'>Select the correct answer.</b><br><br>"
451
+ i = 1
452
+ for answer in np:
453
+ ques = get_question(summary_text,answer,question_model,question_tokenizer)
454
+ distractors = get_distractors(answer.capitalize(),ques,s2v,sentence_transformer_model,40,0.2)
455
+ # output= output + ques + "\n" + "Ans: "+answer.capitalize() + "\n\n"
456
+ output = output + "<b style='color:black;'>Q"+ str(i) + ") " + ques + "</b>"
457
+ # output = output + "<br>"
458
+ i += 1
459
+ output = output + "<br><b> ▪ " +answer.capitalize()+ "</b>"
460
+ if len(distractors)>0:
461
+ for distractor in distractors[:3]:
462
+ output = output + "<p> ▪ " + distractor+ "</p>"
463
+ output = output + "<br>"
464
+
465
+ output = output + "<b style='color:black;'>" + "Correct Answer Key</b><br>"
466
+
467
+ i = 1
468
+ for answer in np:
469
+ i += 1
470
+ output = output + "<b style='color:green;'> ▪ " +answer.capitalize()+ "</b>"
471
+ output = output + "<br>"
472
+
473
+ return output
474
+
475
+ import pandas as pd
476
+
477
+ file =None
478
+
479
+ def filecreate(x,subject,total):
480
+
481
+ with open(x.name) as fo:
482
+ text = fo.read()
483
+ # print(text)
484
+
485
+ words_text = len(re.findall(r'\w+', text))
486
+ words_subject = len(re.findall(r'\w+', subject))
487
+
488
+
489
+ if (words_text < 150):
490
+ raise gr.Error("Invalid Input (Words limit must be more than 150 words).")
491
+ # print("Number of words:", words)
492
+
493
+ elif (words_subject < 1):
494
+ raise gr.Error("Invalid Input (Title must be one or more than one word).")
495
+
496
+ else:
497
+ generated = generate_question(text,subject, total)
498
+ # return text
499
+ # mycursor = mydb.cursor()
500
+
501
+ # timedate= datetime.datetime.now()
502
+
503
+ # sql = "INSERT INTO mcqsfiles (subject, input, output, timedate) VALUES (%s,%s, %s,%s)"
504
+ # val = (subject, text, generated, timedate)
505
+ # mycursor.execute(sql, val)
506
+
507
+ # mydb.commit()
508
+
509
+ # print(mycursor.rowcount, "record inserted.")
510
+
511
+ return generated
512
+
513
+ # filecreate(file,2)
514
+
515
+ import gradio as gr
516
+
517
+ context = gr.HTML(label="Text")
518
+ file = gr.File(label="Upload your file (File must contain more than 150 words).")
519
+ total = gr.Slider(1,10, value=1,step=1, label="Total Number Of Questions")
520
+ subject = gr.Textbox(placeholder="Enter subject/title here...", label="Enter your title (title must contain 1 word).")
521
+
522
+
523
+ fface = gr.Interface(
524
+ fn=filecreate,
525
+ inputs=[file,subject,total],
526
+ outputs=context,
527
+ # css=".gradio-container {background-image: url('file=blue.jpg')}",
528
+ allow_flagging="never",flagging_options=["Save Data"])
529
+
530
+ # fface.launch(debug=True, share=True)
531
+
532
+ demo = gr.TabbedInterface([iface, fface], ["Text", "Upload File"])
533
+ demo.launch(debug=True, show_api=False)