File size: 5,083 Bytes
5f6c201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import gradio as gr
from transformers import pipeline
from fine_tune_hf import FinetuneHFModel

fine_tune_model = FinetuneHFModel()

playground = gr.Blocks()


def review_training_choices(choice):
    print(choice)
    if choice == "Use Pipeline":
        return gr.Row(visible=True)
    else:
        return gr.Row(visible=False)


def show_optional_fields(task):
    if task == "question-answering":
        return gr.TextArea(visible=True)
    return gr.TextArea(visible=False)


def test_pipeline(task, model=None, prompt=None, context=None):
    if model:
        test = pipeline(task, model=model)
    else:
        if task == "ner":
            test = pipeline(task, grouped_entities=True)
        else:
            test = pipeline(task)
    if task == "question-answering":
        if not context:
            return "Context is required"
        else:
            result = test(question=prompt, context=context)
    else:
        result = test(prompt)
    match task:
        case "text-generation":
            return gr.TextArea(result[0]["generated_text"])
        case "fill-mask":
            return gr.TextArea(result[0]["sequence"])
        case "summarization":
            return gr.TextArea(result[0]["summary_text"])
        case "ner":
            ner_result = "\n".join(
                f"{k}={v}" for item in result for k, v in item.items() if k not in ["start", "end", "index"])
            return gr.TextArea(ner_result.rstrip("\n"))

        case "question-answering":
            return gr.TextArea(result)


with playground:
    gr.Markdown("""
                Try your ideas here. Select from Text, Image or Audio
                """)
    with gr.Tabs():
        with gr.TabItem("Text"):
            with gr.Row():
                with gr.Column(scale=4):
                    radio = gr.Radio(
                        ["Use Pipeline", "Fine Tune"],
                        label="Select Use Pipeline to try out HF    models or Fine Tune to test it on your own datasets",
                        value="Use Pipeline",
                        interactive=True,
                    )
                with gr.Column(scale=1):
                    test_pipeline_button = gr.Button(
                        value="Test", variant="primary", size="sm")
            with gr.Row(visible=True) as use_pipeline:
                with gr.Column():
                    task_dropdown = gr.Dropdown(
                        [("Text Generation", "text-generation"), ("Fill Mask",
                                                                  "fill-mask"), ("Summarization", "summarization"), ("Named Entity Recognition", "ner"), ("Question Answering", "question-answering")],
                        label="task",
                    )
                    model_dropdown = gr.Dropdown(
                        [],
                        label="model",
                        allow_custom_value=True,
                        interactive=True
                    )
                    prompt_textarea = gr.TextArea(
                        label="prompt", value="Enter your prompt here", text_align="left")
                    context_for_question_answer = gr.TextArea(
                        label="Context", value="Enter Context for your question here", visible=False, interactive=True)
                    task_dropdown.change(show_optional_fields, inputs=[
                                         task_dropdown], outputs=[context_for_question_answer])
                with gr.Column():
                    text = gr.TextArea(label="Generated Text")
            radio.change(review_training_choices,
                         inputs=radio, outputs=use_pipeline)
            test_pipeline_button.click(test_pipeline, inputs=[
                                       task_dropdown, model_dropdown, prompt_textarea, context_for_question_answer], outputs=text)
        with gr.TabItem("Image"):
            with gr.Row():
                with gr.Column(scale=3):
                    radio = gr.Radio(
                        ["Use Pipeline", "Fine Tune"],
                        label="Select Use Pipeline to try out HF    models or Fine Tune to test it on your own datasets",
                        value="Use Pipeline",
                        interactive=True
                    )
                with gr.Column(scale=1):
                    test_pipeline_button = gr.Button(
                        value="Test", variant="primary", size="sm")
        with gr.TabItem("Audio"):
            with gr.Row():
                with gr.Column(scale=3):
                    radio = gr.Radio(
                        ["Use Pipeline", "Fine Tune"],
                        label="Select Use Pipeline to try out HF    models or Fine Tune to test it on your own datasets",
                        value="Use Pipeline",
                        interactive=True
                    )
                with gr.Column(scale=1):
                    test_pipeline_button = gr.Button(
                        value="Test", variant="primary", size="sm")

playground.launch(share=True)