AjayP13 commited on
Commit
0872eb7
·
verified ·
1 Parent(s): 91a3637

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -5
app.py CHANGED
@@ -6,8 +6,8 @@ model_name = "google/flan-t5-large"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
 
9
- def concatenate_and_generate(text1, text2, reranking, temperature, top_p):
10
- concatenated_text = text1 + " " + text2
11
  inputs = tokenizer(concatenated_text, return_tensors="pt")
12
 
13
  # Generate the output with specified temperature and top_p
@@ -22,20 +22,51 @@ def concatenate_and_generate(text1, text2, reranking, temperature, top_p):
22
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
23
  return generated_text
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  # Define Gradio interface
26
  with gr.Blocks(theme="ParityError/[email protected]") as demo:
27
  gr.Markdown("# TinyStyler Demo")
28
  gr.Markdown("Style transfer the source text into the target style, given some example texts of the target style. You can adjust re-ranking and top_p to your desire to control the quality of style transfer. A higher re-ranking value will generally result in better results, at slower speed.")
29
 
30
- text1 = gr.Textbox(lines=3, placeholder="Enter the source text to transform into the target style...", label="Source Text")
31
- text2 = gr.Textbox(lines=5, placeholder="Enter example texts of the target style (one per line)...", label="Example Texts of the Target Style")
32
  reranking = gr.Slider(1, 10, value=5, step=1, label="Re-ranking")
33
  temperature = gr.Slider(0.0, 2.0, value=1.0, step=0.1, label="Temperature")
34
  top_p = gr.Slider(0.0, 1.0, value=1.0, step=0.1, label="Top-P")
35
 
36
  output = gr.Markdown(label="Output")
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  btn = gr.Button("Generate")
39
- btn.click(concatenate_and_generate, [text1, text2, reranking, temperature, top_p], output)
40
 
41
  demo.launch()
 
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
 
9
+ def concatenate_and_generate(source_text, target_example_texts, reranking, temperature, top_p):
10
+ concatenated_text = source_text + " " + target_example_texts
11
  inputs = tokenizer(concatenated_text, return_tensors="pt")
12
 
13
  # Generate the output with specified temperature and top_p
 
22
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
23
  return generated_text
24
 
25
+ # Preset examples with cached generations
26
+ preset_examples = [
27
+ {
28
+ "source_text": "Once upon a time in a small village",
29
+ "target_example_texts": "In a land far away, there was a kingdom ruled by a wise king. Every day, the people of the kingdom would gather to listen to the king's stories, which were full of wisdom and kindness.",
30
+ "reranking": 5,
31
+ "temperature": 1.0,
32
+ "top_p": 1.0,
33
+ "output": "Once upon a time in a small village in a land far away, there was a kingdom ruled by a wise king. Every day, the people of the kingdom would gather to listen to the king's stories, which were full of wisdom and kindness."
34
+ },
35
+ {
36
+ "source_text": "The quick brown fox",
37
+ "target_example_texts": "A nimble, chocolate-colored fox swiftly darted through the emerald forest, weaving between trees with grace and agility.",
38
+ "reranking": 5,
39
+ "temperature": 0.9,
40
+ "top_p": 0.9,
41
+ "output": "The quick brown fox, a nimble, chocolate-colored fox, swiftly darted through the emerald forest, weaving between trees with grace and agility."
42
+ }
43
+ ]
44
+
45
  # Define Gradio interface
46
  with gr.Blocks(theme="ParityError/[email protected]") as demo:
47
  gr.Markdown("# TinyStyler Demo")
48
  gr.Markdown("Style transfer the source text into the target style, given some example texts of the target style. You can adjust re-ranking and top_p to your desire to control the quality of style transfer. A higher re-ranking value will generally result in better results, at slower speed.")
49
 
50
+ source_text = gr.Textbox(lines=3, placeholder="Enter the source text to transform into the target style...", label="Source Text")
51
+ target_example_texts = gr.Textbox(lines=5, placeholder="Enter example texts of the target style (one per line)...", label="Example Texts of the Target Style")
52
  reranking = gr.Slider(1, 10, value=5, step=1, label="Re-ranking")
53
  temperature = gr.Slider(0.0, 2.0, value=1.0, step=0.1, label="Temperature")
54
  top_p = gr.Slider(0.0, 1.0, value=1.0, step=0.1, label="Top-P")
55
 
56
  output = gr.Markdown(label="Output")
57
+
58
+ def set_example(example):
59
+ return example["source_text"], example["target_example_texts"], example["reranking"], example["temperature"], example["top_p"], example["output"]
60
+
61
+ example_dropdown = gr.Dropdown(label="Preset Examples", choices=[f"Example {i+1}" for i in range(len(preset_examples))])
62
+ example_button = gr.Button("Load Example")
63
+ example_button.click(
64
+ lambda example_index: set_example(preset_examples[int(example_index.split()[-1])-1]),
65
+ inputs=[example_dropdown],
66
+ outputs=[source_text, target_example_texts, reranking, temperature, top_p, output]
67
+ )
68
 
69
  btn = gr.Button("Generate")
70
+ btn.click(concatenate_and_generate, [source_text, target_example_texts, reranking, temperature, top_p], output)
71
 
72
  demo.launch()