Spaces:
Running
Running
File size: 3,977 Bytes
8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 4b50d53 8697543 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
import os
import pandas as pd
import gradio as gr
###################################
# 1. Load and Chunk CSV Data
###################################
df = pd.read_csv("datasets.csv")
# We will chunk the rows in groups of 1,000
chunk_size = 1000
context_data = []
for start_index in range(0, len(df), chunk_size):
# Collect rows for one chunk
chunk_rows = df.iloc[start_index : start_index + chunk_size]
# Build a single text block for these rows
text_block = ""
for row_idx in range(len(chunk_rows)):
row_data = chunk_rows.iloc[row_idx]
for col_name in df.columns:
text_block += f"{col_name}: {str(row_data[col_name])} "
text_block += "\n" # separate rows for clarity
context_data.append(text_block)
###################################
# 2. Retrieve API Key
###################################
groq_key = os.environ.get('groq_api_keys')
###################################
# 3. Language Model & Embeddings
###################################
from langchain_groq import ChatGroq
llm = ChatGroq(
model="llama-3.1-70b-versatile",
api_key=groq_key
)
from langchain_huggingface import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(
model_name="mixedbread-ai/mxbai-embed-large-v1"
)
###################################
# 4. Vector Store
###################################
from langchain_chroma import Chroma
vectorstore = Chroma(
collection_name="professional_medical_store",
embedding_function=embed_model,
persist_directory="./",
)
# Add chunked data to the vector store
vectorstore.add_texts(context_data)
retriever = vectorstore.as_retriever()
###################################
# 5. Prompt Configuration
###################################
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
prompt_template = """
You are a supportive and professional mental-health consultant with extensive medical knowledge.
Speak compassionately while maintaining a calm, informative tone.
Use the context to answer questions about mental well-being or related medical considerations.
If you do not know the answer, say so without hesitation.
Focus on providing actionable insights without explicitly mentioning the context.
Always encourage users to seek professional or emergency help where appropriate.
Context: {context}
Question: {question}
Answer:
"""
rag_prompt = PromptTemplate.from_template(prompt_template)
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| rag_prompt
| llm
| StrOutputParser()
)
###################################
# 6. Gradio Interface
###################################
def chain_stream(user_input):
partial_text = ""
for new_text in rag_chain.stream(user_input):
partial_text += new_text
yield partial_text
examples = [
"I have been feeling anxious and unable to focus. Any recommendations?",
"I've been feeling extremely tired lately—should I see a professional?"
]
disclaimer = (
"**Disclaimer**: I am an AI language model and not a licensed healthcare professional. "
"For urgent concerns, please seek immediate help from qualified medical professionals."
)
title = "Professional Mental Health & Medical Assistant"
demo = gr.Interface(
fn=chain_stream,
inputs=gr.Textbox(
lines=3,
placeholder="Ask your question or describe your situation here..."
),
outputs="text",
title=title,
description=(
"Welcome to your mental-health and medical information companion. "
"I provide professional, empathetic, and trustworthy guidance. "
"Please remember this is not a substitute for direct professional consultation."
),
article=disclaimer,
examples=examples,
allow_flagging="never",
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
|