AjayP13 commited on
Commit
039d611
·
verified ·
1 Parent(s): 0e6f965

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -0
app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
3
+
4
+ # Load the model and tokenizer
5
+ model_name = "google/flan-t5-large"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
+
9
+ def concatenate_and_generate(text1, text2, temperature, top_p):
10
+ concatenated_text = text1 + " " + text2
11
+ inputs = tokenizer(concatenated_text, return_tensors="pt")
12
+
13
+ # Generate the output with specified temperature and top_p
14
+ output = model.generate(
15
+ inputs["input_ids"],
16
+ do_sample=True,
17
+ temperature=temperature,
18
+ top_p=top_p,
19
+ max_length=100
20
+ )
21
+
22
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
23
+ return generated_text
24
+
25
+ # Define Gradio interface
26
+ inputs = [
27
+ gr.inputs.Textbox(lines=2, placeholder="Enter first text here..."),
28
+ gr.inputs.Textbox(lines=2, placeholder="Enter second text here..."),
29
+ gr.inputs.Slider(0.1, 1.0, 0.7, step=0.1, label="Temperature"),
30
+ gr.inputs.Slider(0.1, 1.0, 0.9, step=0.1, label="Top-p")
31
+ ]
32
+ outputs = gr.outputs.Textbox()
33
+
34
+ gr.Interface(
35
+ fn=concatenate_and_generate,
36
+ inputs=inputs,
37
+ outputs=outputs,
38
+ title="Text Concatenation and Generation with FLAN-T5",
39
+ description="Concatenate two input texts and generate an output using google/flan-t5-large. Adjust the temperature and top_p parameters for different generation behaviors."
40
+ ).launch()