Create main.py
Browse files
main.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import streamlit as st
|
4 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
|
5 |
+
from langchain import PromptTemplate, LLMChain
|
6 |
+
from langchain.llms import HuggingFacePipeline
|
7 |
+
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
|
8 |
+
from langchain.memory import ConversationBufferMemory
|
9 |
+
import pandas as pd
|
10 |
+
from sqlalchemy import create_engine
|
11 |
+
import smtplib
|
12 |
+
from email.mime.text import MIMEText
|
13 |
+
from email.mime.multipart import MIMEMultipart
|
14 |
+
|
15 |
+
# Set up the open-source LLM
|
16 |
+
@st.cache_resource
|
17 |
+
def load_model():
|
18 |
+
model_name = "google/flan-t5-large"
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
20 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
21 |
+
pipe = pipeline(
|
22 |
+
"text2text-generation",
|
23 |
+
model=model,
|
24 |
+
tokenizer=tokenizer,
|
25 |
+
max_length=512
|
26 |
+
)
|
27 |
+
return HuggingFacePipeline(pipeline=pipe)
|
28 |
+
|
29 |
+
local_llm = load_model()
|
30 |
+
|
31 |
+
# Set up the database connection
|
32 |
+
db_connection_string = "sqlite:///leads.db" # Replace with your actual database connection string
|
33 |
+
engine = create_engine(db_connection_string)
|
34 |
+
|
35 |
+
# Define the tools for the agent
|
36 |
+
def search_leads(query):
|
37 |
+
df = pd.read_sql(f"SELECT * FROM leads WHERE name LIKE '%{query}%'", engine)
|
38 |
+
return df.to_dict(orient='records')
|
39 |
+
|
40 |
+
def send_email(to_email, subject, body):
|
41 |
+
# For demo purposes, we'll just print the email details
|
42 |
+
st.write(f"Email sent to: {to_email}")
|
43 |
+
st.write(f"Subject: {subject}")
|
44 |
+
st.write(f"Body: {body}")
|
45 |
+
return "Email sent successfully"
|
46 |
+
|
47 |
+
tools = [
|
48 |
+
Tool(
|
49 |
+
name="Search Leads",
|
50 |
+
func=search_leads,
|
51 |
+
description="Useful for searching leads in the database"
|
52 |
+
),
|
53 |
+
Tool(
|
54 |
+
name="Send Email",
|
55 |
+
func=send_email,
|
56 |
+
description="Useful for sending emails to leads"
|
57 |
+
)
|
58 |
+
]
|
59 |
+
|
60 |
+
# Set up the agent
|
61 |
+
prefix = """You are an AI CyberSecurity Program Advisor. Your goal is to engage with leads and get them to book a video call for an in-person sales meeting. You have access to a database of leads and can send emails.
|
62 |
+
|
63 |
+
You have access to the following tools:"""
|
64 |
+
|
65 |
+
suffix = """Begin!
|
66 |
+
|
67 |
+
{chat_history}
|
68 |
+
Human: {human_input}
|
69 |
+
AI: Let's approach this step-by-step:"""
|
70 |
+
|
71 |
+
prompt = ZeroShotAgent.create_prompt(
|
72 |
+
tools,
|
73 |
+
prefix=prefix,
|
74 |
+
suffix=suffix,
|
75 |
+
input_variables=["human_input", "chat_history"]
|
76 |
+
)
|
77 |
+
|
78 |
+
llm_chain = LLMChain(llm=local_llm, prompt=prompt)
|
79 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
80 |
+
|
81 |
+
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
|
82 |
+
agent_executor = AgentExecutor.from_agent_and_tools(
|
83 |
+
agent=agent, tools=tools, verbose=True, memory=memory
|
84 |
+
)
|
85 |
+
|
86 |
+
# Streamlit interface
|
87 |
+
st.title("AI CyberSecurity Program Advisor Demo")
|
88 |
+
|
89 |
+
st.write("This demo showcases an AI agent that can engage with leads and attempt to book video calls for in-person sales meetings.")
|
90 |
+
|
91 |
+
lead_name = st.text_input("Enter a lead's name to engage with:")
|
92 |
+
|
93 |
+
if lead_name:
|
94 |
+
lead_info = search_leads(lead_name)
|
95 |
+
if not lead_info:
|
96 |
+
st.write(f"No lead found with the name {lead_name}")
|
97 |
+
else:
|
98 |
+
lead = lead_info[0]
|
99 |
+
st.write(f"Lead found: {lead['name']} (Email: {lead['email']})")
|
100 |
+
|
101 |
+
initial_message = f"Hello {lead['name']}, I'd like to discuss our cybersecurity program with you. Are you available for a quick video call?"
|
102 |
+
|
103 |
+
if st.button("Engage with Lead"):
|
104 |
+
with st.spinner("AI is generating a response..."):
|
105 |
+
response = agent_executor.run(initial_message)
|
106 |
+
|
107 |
+
st.write("AI Response:")
|
108 |
+
st.write(response)
|
109 |
+
|
110 |
+
st.sidebar.title("About")
|
111 |
+
st.sidebar.info("This is a demo of an AI CyberSecurity Program Advisor using an open-source LLM and LangChain. It's designed to engage with leads and attempt to book video calls for sales meetings.")
|
112 |
+
|
113 |
+
# To run this script, use: streamlit run your_script_name.py
|