Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,48 +1,52 @@
|
|
1 |
#import libraries and dependencies
|
2 |
import gradio as gr
|
3 |
-
from gradio.mix import Parallel
|
|
|
4 |
|
5 |
#instantiate variables as strings
|
6 |
-
title="Text Generators"
|
7 |
-
title1="Level 1 Text Generator"
|
8 |
-
title2="Level 3 Text Generator"
|
9 |
-
description1="This is the basic text generator all students were taught to code using an older, smaller language model. Input text, submit, and the text generator will generate one output text instance."
|
10 |
-
description2="This is a more advanced text generator that many students were taught to code. Input text and the text generator generates three output text instances from three language models. Importantly, two of these language models were designed to process explicit instructions."
|
11 |
-
description3="This is the most advanced text generator that a few students were taught to code. Input text and the text generator generates an output text instance. You can resubmit to include that new text as input text."
|
12 |
-
examples = [
|
13 |
-
["Zoe Kwan is a 20-year old singer and songwriter who has taken Hong Kong’s music scene by storm."],
|
14 |
-
["What is this life for?"],
|
15 |
-
["Write a short story."],
|
16 |
-
["
|
17 |
-
|
18 |
-
]
|
19 |
|
20 |
#instantiate variables as functions
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
25 |
|
26 |
#togethercomputer/GPT-NeoXT-Chat-Base-20B
|
27 |
#decapoda-research/llama-7b-hf
|
28 |
#define functions
|
29 |
-
def complete_with_gpt(text):
|
30 |
-
# Use the last 50 characters of the text as context
|
31 |
-
return text[:-50] + model4(text[-50:])
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
-
|
34 |
-
with gr.Row():
|
35 |
-
textbox = gr.Textbox(placeholder=description3, lines=8)
|
36 |
-
with gr.Column():
|
37 |
-
btn = gr.Button("Submit")
|
38 |
|
39 |
-
|
|
|
|
|
40 |
|
41 |
-
|
42 |
-
tab2 = gr.Parallel(model1, model2, model3, inputs=gr.Textbox(lines=5, label="Input explicit or implicit instructions"), title=title2, description=description2, examples=examples)
|
43 |
-
tab3 = demo
|
44 |
|
45 |
-
|
|
|
46 |
|
47 |
-
|
48 |
-
demo1.launch(debug=True)
|
|
|
1 |
#import libraries and dependencies
|
2 |
import gradio as gr
|
3 |
+
#from gradio.mix import Parallel
|
4 |
+
from transformers import pipeline
|
5 |
|
6 |
#instantiate variables as strings
|
7 |
+
#title="Text Generators"
|
8 |
+
#title1="Level 1 Text Generator"
|
9 |
+
#title2="Level 3 Text Generator"
|
10 |
+
#description1="This is the basic text generator all students were taught to code using an older, smaller language model. Input text, submit, and the text generator will generate one output text instance."
|
11 |
+
#description2="This is a more advanced text generator that many students were taught to code. Input text and the text generator generates three output text instances from three language models. Importantly, two of these language models were designed to process explicit instructions."
|
12 |
+
#description3="This is the most advanced text generator that a few students were taught to code. Input text and the text generator generates an output text instance. You can resubmit to include that new text as input text."
|
13 |
+
#examples = [
|
14 |
+
# ["Zoe Kwan is a 20-year old singer and songwriter who has taken Hong Kong’s music scene by storm."],
|
15 |
+
# ["What is this life for?"],
|
16 |
+
# ["Write a short story."],
|
17 |
+
# ["Once upon a time, "]
|
18 |
+
#]
|
|
|
19 |
|
20 |
#instantiate variables as functions
|
21 |
+
pipe = pipeline("text-generation", model='bigscience/bloom-560m')
|
22 |
+
|
23 |
+
#model1 = gr.Interface.load("huggingface/bigscience/bloom-560m")
|
24 |
+
#model2 = gr.Interface.load("huggingface/google/flan-t5-xl")
|
25 |
+
#model3 = gr.Interface.load("huggingface/bigscience/bloomz-7b1")
|
26 |
+
#model4 = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B")
|
27 |
|
28 |
#togethercomputer/GPT-NeoXT-Chat-Base-20B
|
29 |
#decapoda-research/llama-7b-hf
|
30 |
#define functions
|
31 |
+
#def complete_with_gpt(text):
|
32 |
+
# # Use the last 50 characters of the text as context
|
33 |
+
# return text[:-50] + model4(text[-50:])
|
34 |
+
|
35 |
+
#with gr.Blocks() as demo:
|
36 |
+
# with gr.Row():
|
37 |
+
# textbox = gr.Textbox(placeholder=description3, lines=8)
|
38 |
+
# with gr.Column():
|
39 |
+
# btn = gr.Button("Submit")
|
40 |
|
41 |
+
# btn.click(complete_with_gpt, textbox, textbox)
|
|
|
|
|
|
|
|
|
42 |
|
43 |
+
#tab1 = gr.Interface.load("huggingface/gpt2", title=title1, description=description1, examples=examples)
|
44 |
+
#tab2 = gr.Parallel(model1, model2, model3, inputs=gr.Textbox(lines=5, label="Input explicit or implicit instructions"), title=title2, description=description2, examples=examples)
|
45 |
+
#tab3 = demo
|
46 |
|
47 |
+
#demo1 = gr.TabbedInterface([tab1, tab2, tab3], ["Level 1", "Level 3", "Level 5"], title=title)
|
|
|
|
|
48 |
|
49 |
+
#if __name__ == "__main__":
|
50 |
+
# demo1.launch(debug=True)
|
51 |
|
52 |
+
gr.Interface.from_pipeline(pipe).launch()
|
|