|
from streamlit_webrtc import webrtc_streamer, WebRtcMode |
|
from sentiment_analysis import analyze_sentiment, transcribe_with_chunks |
|
from product_recommender import ProductRecommender |
|
from objection_handler import ObjectionHandler |
|
from google_sheets import fetch_call_data, store_data_in_sheet |
|
from sentence_transformers import SentenceTransformer |
|
from env_setup import config |
|
import re |
|
import uuid |
|
import pandas as pd |
|
import plotly.express as px |
|
import streamlit as st |
|
import numpy as np |
|
from io import BytesIO |
|
import wave |
|
|
|
|
|
objection_handler = ObjectionHandler("objections.csv") |
|
product_recommender = ProductRecommender("recommendations.csv") |
|
model = SentenceTransformer('all-MiniLM-L6-v2') |
|
|
|
def generate_comprehensive_summary(chunks): |
|
""" |
|
Generate a comprehensive summary from conversation chunks |
|
""" |
|
|
|
full_text = " ".join([chunk[0] for chunk in chunks]) |
|
|
|
|
|
total_chunks = len(chunks) |
|
sentiments = [chunk[1] for chunk in chunks] |
|
|
|
|
|
context_keywords = { |
|
'product_inquiry': ['dress', 'product', 'price', 'stock'], |
|
'pricing': ['cost', 'price', 'budget'], |
|
'negotiation': ['installment', 'payment', 'manage'] |
|
} |
|
|
|
|
|
themes = [] |
|
for keyword_type, keywords in context_keywords.items(): |
|
if any(keyword.lower() in full_text.lower() for keyword in keywords): |
|
themes.append(keyword_type) |
|
|
|
|
|
positive_count = sentiments.count('POSITIVE') |
|
negative_count = sentiments.count('NEGATIVE') |
|
neutral_count = sentiments.count('NEUTRAL') |
|
|
|
|
|
key_interactions = [] |
|
for chunk in chunks: |
|
if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']): |
|
key_interactions.append(chunk[0]) |
|
|
|
|
|
summary = f"Conversation Summary:\n" |
|
|
|
|
|
if 'product_inquiry' in themes: |
|
summary += "• Customer initiated a product inquiry about items.\n" |
|
|
|
if 'pricing' in themes: |
|
summary += "• Price and budget considerations were discussed.\n" |
|
|
|
if 'negotiation' in themes: |
|
summary += "• Customer and seller explored flexible payment options.\n" |
|
|
|
|
|
summary += f"\nConversation Sentiment:\n" |
|
summary += f"• Positive Interactions: {positive_count}\n" |
|
summary += f"• Negative Interactions: {negative_count}\n" |
|
summary += f"• Neutral Interactions: {neutral_count}\n" |
|
|
|
|
|
summary += "\nKey Conversation Points:\n" |
|
for interaction in key_interactions[:3]: |
|
summary += f"• {interaction}\n" |
|
|
|
|
|
if positive_count > negative_count: |
|
summary += "\nOutcome: Constructive and potentially successful interaction." |
|
elif negative_count > positive_count: |
|
summary += "\nOutcome: Interaction may require further follow-up." |
|
else: |
|
summary += "\nOutcome: Neutral interaction with potential for future engagement." |
|
|
|
return summary |
|
|
|
def is_valid_input(text): |
|
text = text.strip().lower() |
|
if len(text) < 3 or re.match(r'^[a-zA-Z\s]*$', text) is None: |
|
return False |
|
return True |
|
|
|
def is_relevant_sentiment(sentiment_score): |
|
return sentiment_score > 0.4 |
|
|
|
def calculate_overall_sentiment(sentiment_scores): |
|
if sentiment_scores: |
|
average_sentiment = sum(sentiment_scores) / len(sentiment_scores) |
|
overall_sentiment = ( |
|
"POSITIVE" if average_sentiment > 0 else |
|
"NEGATIVE" if average_sentiment < 0 else |
|
"NEUTRAL" |
|
) |
|
else: |
|
overall_sentiment = "NEUTRAL" |
|
return overall_sentiment |
|
|
|
def handle_objection(text): |
|
query_embedding = model.encode([text]) |
|
distances, indices = objection_handler.index.search(query_embedding, 1) |
|
if distances[0][0] < 1.5: |
|
responses = objection_handler.handle_objection(text) |
|
return "\n".join(responses) if responses else "No objection response found." |
|
return "No objection response found." |
|
|
|
def transcribe_audio(audio_bytes, sample_rate=16000): |
|
"""Transcribe audio using the transcribe_with_chunks function from sentiment_analysis.py.""" |
|
try: |
|
|
|
with BytesIO() as wav_buffer: |
|
with wave.open(wav_buffer, 'wb') as wf: |
|
wf.setnchannels(1) |
|
wf.setsampwidth(2) |
|
wf.setframerate(sample_rate) |
|
wf.writeframes(audio_bytes) |
|
|
|
|
|
chunks = transcribe_with_chunks({}) |
|
if chunks: |
|
return chunks[-1][0] |
|
except Exception as e: |
|
print(f"Error transcribing audio: {e}") |
|
return None |
|
|
|
def real_time_analysis(): |
|
st.info("Listening... Say 'stop' to end the process.") |
|
|
|
def audio_frame_callback(audio_frame): |
|
|
|
audio_data = audio_frame.to_ndarray() |
|
print(f"Audio data shape: {audio_data.shape}") |
|
print(f"Audio data sample: {audio_data[:10]}") |
|
|
|
audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() |
|
|
|
|
|
text = transcribe_audio(audio_bytes) |
|
if text: |
|
st.write(f"*Recognized Text:* {text}") |
|
|
|
|
|
sentiment, score = analyze_sentiment(text) |
|
st.write(f"*Sentiment:* {sentiment} (Score: {score})") |
|
|
|
|
|
objection_response = handle_objection(text) |
|
st.write(f"*Objection Response:* {objection_response}") |
|
|
|
|
|
recommendations = [] |
|
if is_valid_input(text) and is_relevant_sentiment(score): |
|
query_embedding = model.encode([text]) |
|
distances, indices = product_recommender.index.search(query_embedding, 1) |
|
|
|
if distances[0][0] < 1.5: |
|
recommendations = product_recommender.get_recommendations(text) |
|
|
|
if recommendations: |
|
st.write("*Product Recommendations:*") |
|
for rec in recommendations: |
|
st.write(rec) |
|
else: |
|
st.error("No transcription returned.") |
|
|
|
return audio_frame |
|
|
|
|
|
webrtc_ctx = webrtc_streamer( |
|
key="real-time-audio", |
|
mode=WebRtcMode.SENDONLY, |
|
audio_frame_callback=audio_frame_callback, |
|
media_stream_constraints={"audio": True, "video": False}, |
|
) |
|
|
|
def run_app(): |
|
st.set_page_config(page_title="Sales Call Assistant", layout="wide") |
|
st.title("AI Sales Call Assistant") |
|
|
|
st.sidebar.title("Navigation") |
|
app_mode = st.sidebar.radio("Choose a mode:", ["Real-Time Call Analysis", "Dashboard"]) |
|
|
|
if app_mode == "Real-Time Call Analysis": |
|
st.header("Real-Time Sales Call Analysis") |
|
real_time_analysis() |
|
|
|
elif app_mode == "Dashboard": |
|
st.header("Call Summaries and Sentiment Analysis") |
|
try: |
|
data = fetch_call_data(config["google_sheet_id"]) |
|
if data.empty: |
|
st.warning("No data available in the Google Sheet.") |
|
else: |
|
|
|
sentiment_counts = data['Sentiment'].value_counts() |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
with col1: |
|
st.subheader("Sentiment Distribution") |
|
fig_pie = px.pie( |
|
values=sentiment_counts.values, |
|
names=sentiment_counts.index, |
|
title='Call Sentiment Breakdown', |
|
color_discrete_map={ |
|
'POSITIVE': 'green', |
|
'NEGATIVE': 'red', |
|
'NEUTRAL': 'blue' |
|
} |
|
) |
|
st.plotly_chart(fig_pie) |
|
|
|
|
|
with col2: |
|
st.subheader("Sentiment Counts") |
|
fig_bar = px.bar( |
|
x=sentiment_counts.index, |
|
y=sentiment_counts.values, |
|
title='Number of Calls by Sentiment', |
|
labels={'x': 'Sentiment', 'y': 'Number of Calls'}, |
|
color=sentiment_counts.index, |
|
color_discrete_map={ |
|
'POSITIVE': 'green', |
|
'NEGATIVE': 'red', |
|
'NEUTRAL': 'blue' |
|
} |
|
) |
|
st.plotly_chart(fig_bar) |
|
|
|
|
|
st.subheader("All Calls") |
|
display_data = data.copy() |
|
display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...' |
|
st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']]) |
|
|
|
|
|
unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique() |
|
call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids) |
|
|
|
|
|
call_details = data[data['Call ID'] == call_id] |
|
if not call_details.empty: |
|
st.subheader("Detailed Call Information") |
|
st.write(f"**Call ID:** {call_id}") |
|
st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}") |
|
|
|
|
|
st.subheader("Full Call Summary") |
|
st.text_area("Summary:", |
|
value=call_details.iloc[0]['Summary'], |
|
height=200, |
|
disabled=True) |
|
|
|
|
|
st.subheader("Conversation Chunks") |
|
for _, row in call_details.iterrows(): |
|
if pd.notna(row['Chunk']): |
|
st.write(f"**Chunk:** {row['Chunk']}") |
|
st.write(f"**Sentiment:** {row['Sentiment']}") |
|
st.write("---") |
|
else: |
|
st.error("No details available for the selected Call ID.") |
|
except Exception as e: |
|
st.error(f"Error loading dashboard: {e}") |
|
|
|
if __name__ == "__main__": |
|
run_app() |