Muhirwa12a's picture
Update app.py
8697543 verified
raw
history blame
3.98 kB
import os
import pandas as pd
import gradio as gr
###################################
# 1. Load and Chunk CSV Data
###################################
df = pd.read_csv("datasets.csv")
# We will chunk the rows in groups of 1,000
chunk_size = 1000
context_data = []
for start_index in range(0, len(df), chunk_size):
# Collect rows for one chunk
chunk_rows = df.iloc[start_index : start_index + chunk_size]
# Build a single text block for these rows
text_block = ""
for row_idx in range(len(chunk_rows)):
row_data = chunk_rows.iloc[row_idx]
for col_name in df.columns:
text_block += f"{col_name}: {str(row_data[col_name])} "
text_block += "\n" # separate rows for clarity
context_data.append(text_block)
###################################
# 2. Retrieve API Key
###################################
groq_key = os.environ.get('groq_api_keys')
###################################
# 3. Language Model & Embeddings
###################################
from langchain_groq import ChatGroq
llm = ChatGroq(
model="llama-3.1-70b-versatile",
api_key=groq_key
)
from langchain_huggingface import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(
model_name="mixedbread-ai/mxbai-embed-large-v1"
)
###################################
# 4. Vector Store
###################################
from langchain_chroma import Chroma
vectorstore = Chroma(
collection_name="professional_medical_store",
embedding_function=embed_model,
persist_directory="./",
)
# Add chunked data to the vector store
vectorstore.add_texts(context_data)
retriever = vectorstore.as_retriever()
###################################
# 5. Prompt Configuration
###################################
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
prompt_template = """
You are a supportive and professional mental-health consultant with extensive medical knowledge.
Speak compassionately while maintaining a calm, informative tone.
Use the context to answer questions about mental well-being or related medical considerations.
If you do not know the answer, say so without hesitation.
Focus on providing actionable insights without explicitly mentioning the context.
Always encourage users to seek professional or emergency help where appropriate.
Context: {context}
Question: {question}
Answer:
"""
rag_prompt = PromptTemplate.from_template(prompt_template)
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| rag_prompt
| llm
| StrOutputParser()
)
###################################
# 6. Gradio Interface
###################################
def chain_stream(user_input):
partial_text = ""
for new_text in rag_chain.stream(user_input):
partial_text += new_text
yield partial_text
examples = [
"I have been feeling anxious and unable to focus. Any recommendations?",
"I've been feeling extremely tired lately—should I see a professional?"
]
disclaimer = (
"**Disclaimer**: I am an AI language model and not a licensed healthcare professional. "
"For urgent concerns, please seek immediate help from qualified medical professionals."
)
title = "Professional Mental Health & Medical Assistant"
demo = gr.Interface(
fn=chain_stream,
inputs=gr.Textbox(
lines=3,
placeholder="Ask your question or describe your situation here..."
),
outputs="text",
title=title,
description=(
"Welcome to your mental-health and medical information companion. "
"I provide professional, empathetic, and trustworthy guidance. "
"Please remember this is not a substitute for direct professional consultation."
),
article=disclaimer,
examples=examples,
allow_flagging="never",
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)