subtest / app.py
DrishtiSharma's picture
Update app.py
77389d5 verified
raw
history blame
5.57 kB
import streamlit as st
import pandas as pd
import sqlite3
import os
import json
from pathlib import Path
from datetime import datetime, timezone
from crewai import Agent, Crew, Process, Task
from crewai_tools import tool
from langchain_core.prompts import ChatPromptTemplate
from langchain_groq import ChatGroq
from langchain.schema.output import LLMResult
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_community.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
from langchain_community.utilities.sql_database import SQLDatabase
from datasets import load_dataset
import tempfile
os.environ["GROQ_API_KEY"] = st.secrets.get("GROQ_API_KEY", "")
class Event:
def __init__(self, event, text):
self.event = event
self.timestamp = datetime.now(timezone.utc).isoformat()
self.text = text
class LLMCallbackHandler(BaseCallbackHandler):
def __init__(self, log_path: Path):
self.log_path = log_path
def on_llm_start(self, serialized, prompts, **kwargs):
with self.log_path.open("a", encoding="utf-8") as file:
file.write(json.dumps({"event": "llm_start", "text": prompts[0], "timestamp": datetime.now().isoformat()}) + "\n")
def on_llm_end(self, response: LLMResult, **kwargs):
generation = response.generations[-1][-1].message.content
with self.log_path.open("a", encoding="utf-8") as file:
file.write(json.dumps({"event": "llm_end", "text": generation, "timestamp": datetime.now().isoformat()}) + "\n")
llm = ChatGroq(
temperature=0,
model_name="mixtral-8x7b-32768",
callbacks=[LLMCallbackHandler(Path("prompts.jsonl"))],
)
st.title("SQL-RAG using CrewAI πŸš€")
st.write("Analyze and summarize Hugging Face datasets using natural language queries with SQL-based retrieval.")
default_dataset = "datascience/ds-salaries"
st.text("Example dataset: `datascience/ds-salaries` (You can enter your own dataset name)")
dataset_name = st.text_input("Enter Hugging Face dataset name:", value=default_dataset)
if dataset_name:
with st.spinner("Loading dataset..."):
try:
dataset = load_dataset(dataset_name, split="train")
df = pd.DataFrame(dataset)
st.success(f"Dataset '{dataset_name}' loaded successfully!")
st.write("Preview of the dataset:")
st.dataframe(df.head())
temp_dir = tempfile.TemporaryDirectory()
db_path = os.path.join(temp_dir.name, "data.db")
connection = sqlite3.connect(db_path)
df.to_sql("data_table", connection, if_exists="replace", index=False)
db = SQLDatabase.from_uri(f"sqlite:///{db_path}")
@tool("list_tables")
def list_tables() -> str:
return ListSQLDatabaseTool(db=db).invoke("")
@tool("tables_schema")
def tables_schema(tables: str) -> str:
return InfoSQLDatabaseTool(db=db).invoke(tables)
@tool("execute_sql")
def execute_sql(sql_query: str) -> str:
return QuerySQLDataBaseTool(db=db).invoke(sql_query)
@tool("check_sql")
def check_sql(sql_query: str) -> str:
return QuerySQLCheckerTool(db=db, llm=llm).invoke({"query": sql_query})
sql_dev = Agent(
role="Database Developer",
goal="Extract data from the database.",
llm=llm,
tools=[list_tables, tables_schema, execute_sql, check_sql],
allow_delegation=False,
)
data_analyst = Agent(
role="Data Analyst",
goal="Analyze and provide insights.",
llm=llm,
allow_delegation=False,
)
report_writer = Agent(
role="Report Editor",
goal="Summarize the analysis.",
llm=llm,
allow_delegation=False,
)
extract_data = Task(
description="Extract data required for the query: {query}.",
expected_output="Database result for the query",
agent=sql_dev,
)
analyze_data = Task(
description="Analyze the data for: {query}.",
expected_output="Detailed analysis text",
agent=data_analyst,
context=[extract_data],
)
write_report = Task(
description="Summarize the analysis into a short report.",
expected_output="Markdown report",
agent=report_writer,
context=[analyze_data],
)
crew = Crew(
agents=[sql_dev, data_analyst, report_writer],
tasks=[extract_data, analyze_data, write_report],
process=Process.sequential,
verbose=2,
memory=False,
)
query = st.text_input("Enter your query:", placeholder="e.g., 'How does salary vary by company size?'")
if query:
with st.spinner("Processing your query..."):
inputs = {"query": query}
result = crew.kickoff(inputs=inputs)
st.markdown("### Analysis Report:")
st.markdown(result)
temp_dir.cleanup()
except Exception as e:
st.error(f"Error loading dataset: {e}")