File size: 1,481 Bytes
8e63ced
 
1ac767c
489ad9f
5149d00
 
43a91cc
e4f30a4
5149d00
8e167cf
33c504d
1ac767c
5149d00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d410c01
 
5149d00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel, PeftConfig
impot os
client = InferenceClient(api_key=(os.getenv('HF_TOKEN')))

tokenizer = AutoTokenizer.from_pretrained("BeastGokul/Bio-Mistral-7B-finetuned")
base_model = AutoModelForCausalLM.from_pretrained("BeastGokul/Bio-Mistral-7B-finetuned")
base_model.resize_token_embeddings(len(tokenizer))
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")

def get_model_response(user_query):
    messages = [
        {
            "role": "user",
            "content": user_query
        }
        {
            "role": "biomedical assistant",
            "content": ""
        }
    ]
    
    
    response = client.chat_completions.create(
        model=model,
        messages=messages,
        max_tokens=500
    )
    
    # Extract and return the response content
    return response.choices[0].message['content']

with gr.Blocks() as demo:
    gr.Markdown("# Biomedical Query Chatbot")
    with gr.Row():
        user_input = gr.Textbox(
            placeholder="Enter your biomedical query...",
            label="Your Query",
            lines=1
        )
        submit_button = gr.Button("Submit")
        
    output = gr.Textbox(label="Response from Biomedical Assistant")

    submit_button.click(get_model_response, inputs=user_input, outputs=output)

# Launch the app
demo.launch()