Spaces:
Sleeping
Sleeping
File size: 1,096 Bytes
81389c5 70ad4ca 81389c5 05e2145 81389c5 7662bd7 81389c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import gradio as gr
from transformers import T5ForConditionalGeneration, T5Tokenizer
from textwrap import fill
# Load the finetuned model and tokenizer
last_checkpoint = "model/out/kaggle/working/results/checkpoint-1000"
finetuned_model = T5ForConditionalGeneration.from_pretrained(last_checkpoint)
tokenizer = T5Tokenizer.from_pretrained(last_checkpoint)
def answer_question(question):
inputs = "Answer this question truthfully: " + question
tokenized_inputs = tokenizer(inputs, return_tensors="pt", padding=True, truncation=True)
outputs = finetuned_model.generate(**tokenized_inputs)
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
return fill(answer, width=80)
# Create Gradio interface
iface = gr.Interface(
fn=answer_question,
inputs="text",
outputs="text",
title="Medical Question Answering",
description="Enter a medical question to get a truthful answer from the finetuned T5 model.",
examples=[["What is the relationship between very low Mg2+ levels, PTH levels, and Ca2+ levels?"]]
)
# Launch the app
iface.launch()
|