File size: 10,348 Bytes
1f999b3
 
0cb0e01
b549f57
1f999b3
 
1351c73
9ecfb31
77cb3f7
1f999b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78fe05b
1f999b3
 
 
 
 
 
 
 
 
 
 
 
b2701c0
1f999b3
 
4a909dd
 
 
835a0e7
 
e97b0ce
4a909dd
a20ee81
4a909dd
13ad2e3
cbbf105
6ec38f4
cbbf105
4a909dd
b61e085
d12aaa0
 
 
 
 
 
 
 
 
 
abdb6c3
ca8b37e
 
 
 
 
 
 
 
 
 
 
 
fc71b26
 
369498a
1ee0d68
fc71b26
 
 
 
1ee0d68
fc71b26
 
 
1ee0d68
1a5a87f
ca8b37e
 
 
 
 
 
 
 
 
 
562eb9e
ca8b37e
 
 
 
 
 
 
 
 
562eb9e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ca8b37e
 
 
 
61025a3
ca8b37e
 
abdb6c3
 
 
 
 
 
 
 
 
 
 
 
 
369498a
abdb6c3
3f3be5a
 
 
 
 
 
 
 
 
 
369498a
fc3451c
 
 
 
e97b0ce
 
 
 
46f589b
e97b0ce
369498a
abdb6c3
369498a
fc71b26
 
acdd51d
1a5a87f
fc71b26
 
 
 
 
1a5a87f
fc71b26
 
 
 
 
 
1a5a87f
fc71b26
562eb9e
 
ca8b37e
 
 
369498a
ca8b37e
562eb9e
ca8b37e
562eb9e
ca8b37e
 
13ad2e3
 
 
 
 
f3ad576
e97b0ce
7bc6948
13ad2e3
fc3451c
e97b0ce
f3ad576
ea07c49
e97b0ce
 
 
 
 
 
 
fc3451c
abdb6c3
 
 
 
 
 
 
 
 
 
 
 
 
 
8493c36
369498a
795b555
 
 
369498a
 
abdb6c3
 
 
a5c29d1
abdb6c3
 
369498a
a5c29d1
838c132
a5c29d1
369498a
 
 
 
abdb6c3
369498a
 
150b8c0
369498a
fc71b26
 
3b726d6
abdb6c3
fc71b26
0a03f9c
cae0406
fc71b26
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274

import gradio as gr
import os
os.environ["KERAS_BACKEND"] = "tensorflow"                                                                           
import keras
import keras_nlp
import json
import ipywidgets as widgets
from IPython.display import display


css = """
html, body {
    margin: 0;
    padding: 0;
    height: 100%;
    overflow: hidden;
}
body::before {
    content: '';
    position: fixed;
    top: 0;
    left: 0;
    width: 100vw;
    height: 100vh;
    background-image: url('https://github.com/ShebMichel/kagglex_imagebot/blob/main/geoBot_to_github.gif');
    background-size: cover;
    background-repeat: no-repeat;
    opacity: 0.65;             /* Faint background image */
    background-position: center;
    z-index: -1;    /* Keep the background behind text */
}
.gradio-container {
    display: flex;
    justify-content: center;
    align-items: center;
    height: 100vh;  /* Ensure the content is vertically centered */
}
"""


# List of available models
available_models = [
    "hf://ShebMichel/geobot_teacher-v0",
    "hf://ShebMichel/geobot_teacher-v1",
    "hf://ShebMichel/geobot_teacher-v2"]
first_model = available_models[0]  # Set the first item as a global variable

# geomodel_llm = keras_nlp.models.CausalLM.from_preset("hf://ShebMichel/geobot_teacher-v0")
# Function to load the selected model

def load_model(model_name):
    geomodel_llm = keras_nlp.models.CausalLM.from_preset(str(model_name))
    return geomodel_llm

def model_launch(input):
    #return f"Model: {first_model}\n\nResponse to: {input}"
    geomodel_llm=load_model(first_model)
    template = "Instruction:\n{instruction}\n\nResponse:\n{response}"
    prompt = template.format(
        instruction=input,                                                                   
        response="",
    )
    out = geomodel_llm.generate(prompt, max_length=1024)
    ind = out.index('Response') + len('Response')+2
    return out[ind:]

# Global variable to store loaded JSON data
loaded_data = {}

def process_json_file(file):
    global loaded_data
    try:
        # Read the JSON file
        with open(file.name, 'r') as f:
            loaded_data = json.load(f)
        
        # Generate the full output text
        output_text = generate_output_text(loaded_data)
        
        print("\nProcessing complete!")
        # Enable radio buttons and buttons upon successful loading
        return output_text, gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
        
    except json.JSONDecodeError as e:
        error_msg = f"Error decoding JSON: {str(e)}"
        print(error_msg)
        return error_msg, gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False)
    except Exception as e:
        error_msg = f"Error processing file: {str(e)}"
        print(error_msg)
        return error_msg, gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False)

def generate_output_text(data, section="ALL"):
    output_text = ""
    
    if section == "ALL" or section == "header":
        if 'header' in data:
            output_text += "=== EXAM DETAILS ===\n"
            for key, value in data['header'].items():
                output_text += f"{key.replace('_', ' ').title()}: {value}\n"
            output_text += "\n"
    
    if section == "ALL" or section == "MULTIPLE CHOICE QUESTIONS":
        if 'multiple_choice_questions' in data:
            output_text += "=== MULTIPLE CHOICE QUESTIONS ===\n"
            for q_num, q_data in data['multiple_choice_questions'].items():
                output_text += f"\nQuestion {q_num.replace('question', '')}:\n"
                output_text += f"{q_data['question']}\n"
                for opt_key, opt_val in q_data['options'].items():
                    output_text += f"{opt_key}) {opt_val}\n"
                output_text += f"Answer: {q_data['answer']}\n"
    
    if section == "ALL" or section == "SHORT ANSWER QUESTIONS":
        if 'short_answer_questions' in data:
            output_text += "\n=== SHORT ANSWER QUESTIONS ===\n"
            for q_num, q_data in data['short_answer_questions'].items():
                output_text += f"\nQuestion {q_num.replace('question', '')}:\n"
                output_text += f"{q_data['question']}\n"
                output_text += f"Answer: {q_data['answer']}\n"
    
    if section == "ALL" or section == "LONG ANSWER QUESTIONS":
        if 'long_answer_questions' in data:
            output_text += "\n=== LONG ANSWER QUESTIONS ===\n"
            for q_num, q_data in data['long_answer_questions'].items():
                output_text += f"\nQuestion {q_num.replace('question', '')}:\n"
                output_text += f"{q_data['question']}\n"
                output_text += f"Answer: {q_data['answer']}\n"
    
    return output_text

def update_textbox_based_on_radio(selection):
    # Generate output based on the selected section and show all questions in that category
    return generate_output_text(loaded_data, section=selection)

def set_question_to_edit(selection):
    # Sets the first question from the selected category in the edit box if available
    output_text = ""
    if selection == "MULTIPLE CHOICE QUESTIONS" and 'multiple_choice_questions' in loaded_data:
        first_question = list(loaded_data['multiple_choice_questions'].values())[0].get('question', '')
    elif selection == "SHORT ANSWER QUESTIONS" and 'short_answer_questions' in loaded_data:
        first_question = list(loaded_data['short_answer_questions'].values())[0].get('question', '')
    elif selection == "LONG ANSWER QUESTIONS" and 'long_answer_questions' in loaded_data:
        first_question = list(loaded_data['long_answer_questions'].values())[0].get('question', '')
    else:
        first_question = ""
    return first_question

def submit_question(question_content):
    # Simulate processing by an LLM model
    #model_output = f"Model response to the question: {question_content[:100]}..."  # Only showing part for brevity
    template = "Instruction:\n{instruction}\n\nResponse:\n{response}"
    prompt = template.format(
        instruction=question_content,                                                                   
        response="",
    )
    out = geomodel_llm.generate(prompt, max_length=1024)
    ind = out.index('Response') + len('Response')+2
    #return out[ind:]
    return model_output [ind:] #model_output

# Define a function to display the selected model
def display_selected_model(selected_model):
    return f"Selected Model: {selected_model}"

# Function to update the global `first_model` whenever the dropdown changes
def update_first_model(selected_model):
    global first_model
    first_model = selected_model
    return first_model

def clear_output():
    return "", ""

# Create Gradio interface
with gr.Blocks() as iface:
    gr.Markdown("# GEOSCIENCE EXAM MARKER")
    
    with gr.Row():
        file_input = gr.File(
            label="Upload JSON Exam File",
            file_types=[".json"]
        )
    
    with gr.Row():
        output_text = gr.Textbox(
            label="Processed Questions",
            lines=20,
            max_lines=30
        )
    
    with gr.Row():
        radio_section = gr.Radio(
            ["MULTIPLE CHOICE QUESTIONS", "SHORT ANSWER QUESTIONS", "LONG ANSWER QUESTIONS", "ALL"], 
            label="Select Section to Display",
            interactive=False
        )

    # Update output_text when radio selection changes
    radio_section.change(
        fn=update_textbox_based_on_radio,
        inputs=[radio_section],
        outputs=[output_text]
    )
    ##
    with gr.Row():
        with gr.Column(scale=2):
            model_dropdown = gr.Dropdown(
                choices=available_models,
                label="Select Model",
                value=first_model  # Set initial selection to the first model
               # visible=False  # Initially hidden
            )
            # Create a Textbox to display the selected model
            # Textbox to display the current selected model
            selected_output = gr.Textbox(label="Current Selected Model", value=f"{first_model}")
            #selected_output = gr.Textbox(label="Current Selected Model", value=f"Selected Model: {first_model}")

            # Update the global `first_model` when dropdown changes and display it
            model_dropdown.change(
                fn=update_first_model,
                inputs=model_dropdown,
                outputs=selected_output
            )
        
    # Editable question text box
    with gr.Row():
        question_edit = gr.Textbox(
            label="Edit Selected Question",
            placeholder="Select a section and a question to edit here...",
            lines=4
        )

    # Output box for the model's response
    model_response = gr.Textbox(
        label="Model Response",
        lines=4,
        interactive=False
    )
    
    # Submit button - Passes selected question to the model
    with gr.Row():
        submit_button = gr.Button("Submit", interactive=False)
        clear_button = gr.Button("Clear", interactive=False)
    
    # Define button actions
    radio_section.change(
        fn=set_question_to_edit,
        inputs=[radio_section],
        outputs=[question_edit]
    )

    submit_button.click(
        fn=model_launch,#submit_question, ## model_launch
        inputs=[question_edit],  # Include the dropdown in inputs
        outputs=[model_response] 
    )
    
    clear_button.click(
        fn=clear_output,
        outputs=[question_edit, model_response]
    )

    
    # Process file input and enable radio buttons and buttons on successful load
    file_input.upload(
        fn=process_json_file,
        inputs=[file_input],
        outputs=[output_text, radio_section, radio_section, radio_section, radio_section, submit_button, clear_button, submit_button]
    )

# Launch the interface 
iface.launch()