BeastGokul's picture
Update app.py
69cfff4 verified
raw
history blame
1.48 kB
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel, PeftConfig
impot os
client = InferenceClient(api_key=(os.getenv('HF_TOKEN')))
tokenizer = AutoTokenizer.from_pretrained("BeastGokul/Bio-Mistral-7B-finetuned")
base_model = AutoModelForCausalLM.from_pretrained("BeastGokul/Bio-Mistral-7B-finetuned")
base_model.resize_token_embeddings(len(tokenizer))
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
def get_model_response(user_query):
messages = [
{
"role": "user",
"content": user_query
}
{
"role": "biomedical assistant",
"content": ""
}
]
response = client.chat_completions.create(
model=model,
messages=messages,
max_tokens=500
)
# Extract and return the response content
return response.choices[0].message['content']
with gr.Blocks() as demo:
gr.Markdown("# Biomedical Query Chatbot")
with gr.Row():
user_input = gr.Textbox(
placeholder="Enter your biomedical query...",
label="Your Query",
lines=1
)
submit_button = gr.Button("Submit")
output = gr.Textbox(label="Response from Biomedical Assistant")
submit_button.click(get_model_response, inputs=user_input, outputs=output)
# Launch the app
demo.launch()