import gradio as gr from huggingface_hub import InferenceClient from transformers import AutoTokenizer, AutoModelForCausalLM from peft import PeftModel, PeftConfig client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") tokenizer = AutoTokenizer.from_pretrained("BeastGokul/Bio-Mistral-7B-finetuned") base_model = AutoModelForCausalLM.from_pretrained("BioMistral/BioMistral-7B") base_model.resize_token_embeddings(len(tokenizer)) model = PeftModel.from_pretrained(base_model, "BeastGokul/Bio-Mistral-7B-finetuned") def generate_response(user_query): inputs = tokenizer(user_query, return_tensors="pt") outputs = model.generate(**inputs, max_length=100) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Define the Gradio interface with gr.Blocks() as demo: user_input = gr.Textbox(placeholder="Enter your biomedical query...", label="Your Query") response = gr.Textbox(label="Response", interactive=False) user_input.submit(fn=generate_response, inputs=user_input, outputs=response) demo.launch()