Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
os.environ["KERAS_BACKEND"] = "tensorflow" | |
import keras | |
import keras_nlp | |
import json | |
import ipywidgets as widgets | |
from IPython.display import display | |
css = """ | |
html, body { | |
margin: 0; | |
padding: 0; | |
height: 100%; | |
overflow: hidden; | |
} | |
body::before { | |
content: ''; | |
position: fixed; | |
top: 0; | |
left: 0; | |
width: 100vw; | |
height: 100vh; | |
background-image: url('https://github.com/ShebMichel/kagglex_imagebot/blob/main/geoBot_to_github.gif'); | |
background-size: cover; | |
background-repeat: no-repeat; | |
opacity: 0.65; /* Faint background image */ | |
background-position: center; | |
z-index: -1; /* Keep the background behind text */ | |
} | |
.gradio-container { | |
display: flex; | |
justify-content: center; | |
align-items: center; | |
height: 100vh; /* Ensure the content is vertically centered */ | |
} | |
""" | |
# List of available models | |
available_models = [ | |
"hf://ShebMichel/geobot_teacher-v0", | |
"hf://ShebMichel/geobot_teacher-v1", | |
"hf://ShebMichel/geobot_teacher-v2"] | |
first_model = available_models[0] # Set the first item as a global variable | |
# geomodel_llm = keras_nlp.models.CausalLM.from_preset("hf://ShebMichel/geobot_teacher-v0") | |
# Function to load the selected model | |
def load_model(model_name): | |
geomodel_llm = keras_nlp.models.CausalLM.from_preset(str(model_name)) | |
return geomodel_llm | |
def model_launch(input): | |
#return f"Model: {first_model}\n\nResponse to: {input}" | |
geomodel_llm=load_model(first_model) | |
template = "Instruction:\n{instruction}\n\nResponse:\n{response}" | |
prompt = template.format( | |
instruction=input, | |
response="", | |
) | |
out = geomodel_llm.generate(prompt, max_length=1024) | |
ind = out.index('Response') + len('Response')+2 | |
return out[ind:] | |
# Global variable to store loaded JSON data | |
loaded_data = {} | |
def process_json_file(file): | |
global loaded_data | |
try: | |
# Read the JSON file | |
with open(file.name, 'r') as f: | |
loaded_data = json.load(f) | |
# Generate the full output text | |
output_text = generate_output_text(loaded_data) | |
print("\nProcessing complete!") | |
# Enable radio buttons and buttons upon successful loading | |
return output_text, gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True) | |
except json.JSONDecodeError as e: | |
error_msg = f"Error decoding JSON: {str(e)}" | |
print(error_msg) | |
return error_msg, gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False) | |
except Exception as e: | |
error_msg = f"Error processing file: {str(e)}" | |
print(error_msg) | |
return error_msg, gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False) | |
def generate_output_text(data, section="ALL"): | |
output_text = "" | |
if section == "ALL" or section == "header": | |
if 'header' in data: | |
output_text += "=== EXAM DETAILS ===\n" | |
for key, value in data['header'].items(): | |
output_text += f"{key.replace('_', ' ').title()}: {value}\n" | |
output_text += "\n" | |
if section == "ALL" or section == "MULTIPLE CHOICE QUESTIONS": | |
if 'multiple_choice_questions' in data: | |
output_text += "=== MULTIPLE CHOICE QUESTIONS ===\n" | |
for q_num, q_data in data['multiple_choice_questions'].items(): | |
output_text += f"\nQuestion {q_num.replace('question', '')}:\n" | |
output_text += f"{q_data['question']}\n" | |
for opt_key, opt_val in q_data['options'].items(): | |
output_text += f"{opt_key}) {opt_val}\n" | |
output_text += f"Answer: {q_data['answer']}\n" | |
if section == "ALL" or section == "SHORT ANSWER QUESTIONS": | |
if 'short_answer_questions' in data: | |
output_text += "\n=== SHORT ANSWER QUESTIONS ===\n" | |
for q_num, q_data in data['short_answer_questions'].items(): | |
output_text += f"\nQuestion {q_num.replace('question', '')}:\n" | |
output_text += f"{q_data['question']}\n" | |
output_text += f"Answer: {q_data['answer']}\n" | |
if section == "ALL" or section == "LONG ANSWER QUESTIONS": | |
if 'long_answer_questions' in data: | |
output_text += "\n=== LONG ANSWER QUESTIONS ===\n" | |
for q_num, q_data in data['long_answer_questions'].items(): | |
output_text += f"\nQuestion {q_num.replace('question', '')}:\n" | |
output_text += f"{q_data['question']}\n" | |
output_text += f"Answer: {q_data['answer']}\n" | |
return output_text | |
def update_textbox_based_on_radio(selection): | |
# Generate output based on the selected section and show all questions in that category | |
return generate_output_text(loaded_data, section=selection) | |
def set_question_to_edit(selection): | |
# Sets the first question from the selected category in the edit box if available | |
output_text = "" | |
if selection == "MULTIPLE CHOICE QUESTIONS" and 'multiple_choice_questions' in loaded_data: | |
first_question = list(loaded_data['multiple_choice_questions'].values())[0].get('question', '') | |
elif selection == "SHORT ANSWER QUESTIONS" and 'short_answer_questions' in loaded_data: | |
first_question = list(loaded_data['short_answer_questions'].values())[0].get('question', '') | |
elif selection == "LONG ANSWER QUESTIONS" and 'long_answer_questions' in loaded_data: | |
first_question = list(loaded_data['long_answer_questions'].values())[0].get('question', '') | |
else: | |
first_question = "" | |
return first_question | |
def submit_question(question_content): | |
# Simulate processing by an LLM model | |
#model_output = f"Model response to the question: {question_content[:100]}..." # Only showing part for brevity | |
template = "Instruction:\n{instruction}\n\nResponse:\n{response}" | |
prompt = template.format( | |
instruction=question_content, | |
response="", | |
) | |
out = geomodel_llm.generate(prompt, max_length=1024) | |
ind = out.index('Response') + len('Response')+2 | |
#return out[ind:] | |
return model_output [ind:] #model_output | |
# Define a function to display the selected model | |
def display_selected_model(selected_model): | |
return f"Selected Model: {selected_model}" | |
# Function to update the global `first_model` whenever the dropdown changes | |
def update_first_model(selected_model): | |
global first_model | |
first_model = selected_model | |
return first_model | |
def clear_output(): | |
return "", "" | |
# Create Gradio interface | |
with gr.Blocks() as iface: | |
gr.Markdown("# GEOSCIENCE EXAM MARKER") | |
with gr.Row(): | |
file_input = gr.File( | |
label="Upload JSON Exam File", | |
file_types=[".json"] | |
) | |
with gr.Row(): | |
output_text = gr.Textbox( | |
label="Processed Questions", | |
lines=20, | |
max_lines=30 | |
) | |
with gr.Row(): | |
radio_section = gr.Radio( | |
["MULTIPLE CHOICE QUESTIONS", "SHORT ANSWER QUESTIONS", "LONG ANSWER QUESTIONS", "ALL"], | |
label="Select Section to Display", | |
interactive=False | |
) | |
# Update output_text when radio selection changes | |
radio_section.change( | |
fn=update_textbox_based_on_radio, | |
inputs=[radio_section], | |
outputs=[output_text] | |
) | |
## | |
with gr.Row(): | |
with gr.Column(scale=2): | |
model_dropdown = gr.Dropdown( | |
choices=available_models, | |
label="Select Model", | |
value=first_model # Set initial selection to the first model | |
# visible=False # Initially hidden | |
) | |
# Create a Textbox to display the selected model | |
# Textbox to display the current selected model | |
selected_output = gr.Textbox(label="Current Selected Model", value=f"{first_model}") | |
#selected_output = gr.Textbox(label="Current Selected Model", value=f"Selected Model: {first_model}") | |
# Update the global `first_model` when dropdown changes and display it | |
model_dropdown.change( | |
fn=update_first_model, | |
inputs=model_dropdown, | |
outputs=selected_output | |
) | |
# Editable question text box | |
with gr.Row(): | |
question_edit = gr.Textbox( | |
label="Edit Selected Question", | |
placeholder="Select a section and a question to edit here...", | |
lines=4 | |
) | |
# Output box for the model's response | |
model_response = gr.Textbox( | |
label="Model Response", | |
lines=4, | |
interactive=False | |
) | |
# Submit button - Passes selected question to the model | |
with gr.Row(): | |
submit_button = gr.Button("Submit", interactive=False) | |
clear_button = gr.Button("Clear", interactive=False) | |
# Define button actions | |
radio_section.change( | |
fn=set_question_to_edit, | |
inputs=[radio_section], | |
outputs=[question_edit] | |
) | |
submit_button.click( | |
fn=model_launch,#submit_question, ## model_launch | |
inputs=[question_edit], # Include the dropdown in inputs | |
outputs=[model_response] | |
) | |
clear_button.click( | |
fn=clear_output, | |
outputs=[question_edit, model_response] | |
) | |
# Process file input and enable radio buttons and buttons on successful load | |
file_input.upload( | |
fn=process_json_file, | |
inputs=[file_input], | |
outputs=[output_text, radio_section, radio_section, radio_section, radio_section, submit_button, clear_button, submit_button] | |
) | |
# Launch the interface | |
iface.launch() | |