BeastGokul commited on
Commit
e7ed7d1
·
verified ·
1 Parent(s): 67b0b7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -25
app.py CHANGED
@@ -6,48 +6,66 @@ For more information on `huggingface_hub` Inference API support, please check th
6
  """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
- # Load the model and tokenizer manually
10
  model_name = "BeastGokul/Bio-Medical-MultiModal-Llama-3-8B-Finetuned"
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
  model = AutoModelForCausalLM.from_pretrained(model_name)
13
 
14
- # Create a pipeline using the manually loaded model and tokenizer
15
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
16
-
17
 
18
  def generate_response(chat_history, max_length, temperature, top_p):
19
- conversation = "\n".join([f"User: {msg[0]}\nModel: {msg[1]}" for msg in chat_history if msg[1]])
20
- input_text = f"{conversation}\nUser: {chat_history[-1][0]}\nModel:"
21
-
22
- # Use the pipeline to generate a response
23
- generated_text = pipe(
24
- input_text,
25
- max_length=max_length,
26
- temperature=temperature,
27
- top_p=top_p,
28
- num_return_sequences=1
29
- )[0]["generated_text"]
30
-
31
- # Extract only the model's response after "Model:"
32
- response_text = generated_text.split("Model:")[-1].strip()
33
- chat_history[-1] = (chat_history[-1][0], response_text)
34
  return chat_history, chat_history
35
 
36
- with gr.Blocks() as interface:
37
- gr.Markdown("# Biomedical AI Chat Interface")
38
-
 
 
 
 
 
 
 
 
 
 
 
 
39
  chat_history = gr.State([])
40
 
41
  with gr.Row():
42
- user_input = gr.Textbox(placeholder="Enter your biomedical query...")
43
- chat_display = gr.Chatbox(label="Chat History")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  with gr.Row():
46
  max_length = gr.Slider(50, 500, value=200, step=10, label="Response Length")
47
  temperature = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature")
48
  top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.1, label="Top-p")
49
 
50
- send_button = gr.Button("Send")
 
 
 
 
51
 
52
  def add_user_message(user_message, chat_history):
53
  chat_history.append((user_message, ""))
@@ -68,5 +86,6 @@ with gr.Blocks() as interface:
68
  interface.launch()
69
 
70
 
 
71
  if __name__ == "__main__":
72
  demo.launch()
 
6
  """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
+
10
  model_name = "BeastGokul/Bio-Medical-MultiModal-Llama-3-8B-Finetuned"
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
  model = AutoModelForCausalLM.from_pretrained(model_name)
13
 
 
 
 
14
 
15
  def generate_response(chat_history, max_length, temperature, top_p):
16
+ conversation = "\n".join([f"User: {msg[0]}\nModel: {msg[1]}" for msg in chat_history])
17
+ inputs = tokenizer(conversation, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
18
+ output = model.generate(**inputs, max_length=max_length, temperature=temperature, top_p=top_p, do_sample=True)
19
+ response_text = tokenizer.decode(output[0], skip_special_tokens=True).split("Model:")[-1].strip()
20
+ chat_history.append((chat_history[-1][0], response_text))
 
 
 
 
 
 
 
 
 
 
21
  return chat_history, chat_history
22
 
23
+ with gr.Blocks(css="""
24
+ .chatbox { max-height: 600px; overflow-y: auto; background-color: #f8f9fa; border: 1px solid #e0e0e0; padding: 10px; border-radius: 8px; }
25
+ .message { padding: 8px; margin: 4px 0; border-radius: 6px; }
26
+ .user-message { background-color: #cce5ff; text-align: left; }
27
+ .model-message { background-color: #e2e3e5; text-align: left; }
28
+ """) as interface:
29
+ gr.Markdown(
30
+ """
31
+ <h1 style="text-align:center; color: #2c3e50;">Biomedical AI Chat Interface</h1>
32
+ <p style="text-align:center; color: #34495e;">
33
+ Ask any biomedical or health-related questions to interact with the AI.
34
+ </p>
35
+ """
36
+ )
37
+
38
  chat_history = gr.State([])
39
 
40
  with gr.Row():
41
+ user_input = gr.Textbox(
42
+ lines=2,
43
+ placeholder="Type your biomedical query here...",
44
+ label="Your Message",
45
+ elem_id="user-input",
46
+ container=False
47
+ )
48
+ chat_display = gr.Chatbox(label="Chat History", elem_id="chatbox", css_class="chatbox")
49
+
50
+ example_queries = [
51
+ "What are the common symptoms of diabetes?",
52
+ "Explain the function of hemoglobin.",
53
+ "How does insulin work in the body?",
54
+ "What are the side effects of chemotherapy?",
55
+ "Can you explain the process of DNA replication?"
56
+ ]
57
+ user_input.style(placeholder="Enter your biomedical query...")
58
 
59
  with gr.Row():
60
  max_length = gr.Slider(50, 500, value=200, step=10, label="Response Length")
61
  temperature = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature")
62
  top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.1, label="Top-p")
63
 
64
+ send_button = gr.Button("Send", elem_id="send-button")
65
+
66
+ with gr.Row():
67
+ for query in example_queries:
68
+ gr.Button(query).click(fn=lambda q=query: (q, []), outputs=[user_input, chat_history])
69
 
70
  def add_user_message(user_message, chat_history):
71
  chat_history.append((user_message, ""))
 
86
  interface.launch()
87
 
88
 
89
+
90
  if __name__ == "__main__":
91
  demo.launch()