File size: 15,923 Bytes
2a778a6 c95c282 ecaceca 528146d 46ea492 6ce6beb ef79036 528146d ecaceca 6143436 ecaceca 943c198 6ce6beb 2e87ddf 2a778a6 943c198 ecaceca 2a778a6 ecaceca 2a778a6 ecaceca 2e87ddf 2a778a6 6495c45 2e87ddf 6143436 2e87ddf 6495c45 2a778a6 46ea492 2a778a6 ef79036 2a778a6 6143436 2a778a6 6143436 46ea492 6143436 2a778a6 6ce6beb 2a778a6 6ce6beb ef79036 6b2d7b0 ef79036 6b2d7b0 ef79036 6b2d7b0 6143436 ef79036 6b2d7b0 ef79036 6b2d7b0 ef79036 6143436 ef79036 6143436 ef79036 bcfe461 ecaceca 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 9a6ec23 740f405 ecaceca 740f405 ecaceca 740f405 9a6ec23 740f405 ecaceca 9a6ec23 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 |
from streamlit_webrtc import webrtc_streamer, WebRtcMode, AudioProcessorBase
from sentiment_analysis import analyze_sentiment, transcribe_with_chunks
from product_recommender import ProductRecommender
from objection_handler import ObjectionHandler
from google_sheets import fetch_call_data, store_data_in_sheet
from sentence_transformers import SentenceTransformer
from env_setup import config
import re
import uuid
import pandas as pd
import plotly.express as px
import streamlit as st
import numpy as np
import queue
import threading
# Initialize components
objection_handler = ObjectionHandler("objections.csv")
product_recommender = ProductRecommender("recommendations.csv")
model = SentenceTransformer('all-MiniLM-L6-v2')
# Queue to hold transcribed text
transcription_queue = queue.Queue()
def generate_comprehensive_summary(chunks):
# Your existing function implementation
pass
def is_valid_input(text):
# Your existing function implementation
pass
def is_relevant_sentiment(sentiment_score):
# Your existing function implementation
pass
def calculate_overall_sentiment(sentiment_scores):
# Your existing function implementation
pass
def handle_objection(text):
query_embedding = model.encode([text])
distances, indices = objection_handler.index.search(query_embedding, 1)
if distances[0][0] < 1.5:
responses = objection_handler.handle_objection(text)
return "\n".join(responses) if responses else "No objection response found."
return "No objection response found."
class AudioProcessor(AudioProcessorBase):
def __init__(self):
self.sr = 16000 # Sample rate
self.q = transcription_queue
def recv(self, frame):
audio_data = frame.to_ndarray()
audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() # Convert to int16 format
print(f"Audio data shape: {audio_data.shape}")
print(f"Audio data sample: {audio_data[:10]}")
text = self.transcribe_audio(audio_bytes)
if text:
self.q.put(text)
return frame
def transcribe_audio(self, audio_bytes):
try:
chunks = transcribe_with_chunks({})
if chunks:
return chunks[-1][0]
except Exception as e:
print(f"Error transcribing audio: {e}")
return None
def real_time_analysis():
st.info("Listening... Say 'stop' to end the process.")
webrtc_ctx = webrtc_streamer(
key="real-time-audio",
mode=WebRtcMode.SENDONLY,
audio_processor_factory=AudioProcessor,
media_stream_constraints={"audio": True, "video": False},
)
if webrtc_ctx.state.playing:
while not transcription_queue.empty():
text = transcription_queue.get()
st.write(f"*Recognized Text:* {text}")
sentiment, score = analyze_sentiment(text)
st.write(f"*Sentiment:* {sentiment} (Score: {score})")
objection_response = handle_objection(text)
st.write(f"*Objection Response:* {objection_response}")
recommendations = []
if is_valid_input(text) and is_relevant_sentiment(score):
query_embedding = model.encode([text])
distances, indices = product_recommender.index.search(query_embedding, 1)
if distances[0][0] < 1.5:
recommendations = product_recommender.get_recommendations(text)
if recommendations:
st.write("*Product Recommendations:*")
for rec in recommendations:
st.write(rec)
def fetch_data_and_display():
try:
st.header("Call Summaries and Sentiment Analysis")
data = fetch_call_data(config["google_sheet_id"])
print(f"Fetched data: {data}") # Log fetched data
if data.empty:
st.warning("No data available in the Google Sheet.")
else:
sentiment_counts = data['Sentiment'].value_counts()
col1, col2 = st.columns(2)
with col1:
st.subheader("Sentiment Distribution")
fig_pie = px.pie(
values=sentiment_counts.values,
names=sentiment_counts.index,
title='Call Sentiment Breakdown',
color_discrete_map={
'POSITIVE': 'green',
'NEGATIVE': 'red',
'NEUTRAL': 'blue'
}
)
st.plotly_chart(fig_pie)
with col2:
st.subheader("Sentiment Counts")
fig_bar = px.bar(
x=sentiment_counts.index,
y=sentiment_counts.values,
title='Number of Calls by Sentiment',
labels={'x': 'Sentiment', 'y': 'Number of Calls'},
color=sentiment_counts.index,
color_discrete_map={
'POSITIVE': 'green',
'NEGATIVE': 'red',
'NEUTRAL': 'blue'
}
)
st.plotly_chart(fig_bar)
st.subheader("All Calls")
display_data = data.copy()
display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
call_details = data[data['Call ID'] == call_id]
if not call_details.empty:
st.subheader("Detailed Call Information")
st.write(f"**Call ID:** {call_id}")
st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
st.subheader("Full Call Summary")
st.text_area("Summary:",
value=call_details.iloc[0]['Summary'],
height=200,
disabled=True)
st.subheader("Conversation Chunks")
for _, row in call_details.iterrows():
if pd.notna(row['Chunk']):
st.write(f"**Chunk:** {row['Chunk']}")
st.write(f"**Sentiment:** {row['Sentiment']}")
st.write("---")
else:
st.error("No details available for the selected Call ID.")
except Exception as e:
st.error(f"Error loading dashboard: {e}")
def run_app():
st.set_page_config(page_title="Sales Call Assistant", layout="wide")
st.title("AI Sales Call Assistant")
st.warning("The space is currently not working due to issues with real-time transcription and data fetching from Google Sheets. We are working on resolving these issues.")
st.markdown("""
<style>
/* Header Container Styling */
.header-container {
background: linear-gradient(135deg, #F8F9FA 0%, #E9ECEF 100%);
padding: 20px;
border-radius: 15px;
margin-bottom: 30px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
/* Section Container Styling */
.section {
background: linear-gradient(135deg, #FFFFFF 0%, #F8F9FA 100%);
padding: 25px;
border-radius: 15px;
margin-bottom: 30px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
/* Header Text Styling */
.header {
font-size: 2.5em;
font-weight: 800;
text-align: center;
background: linear-gradient(120deg, #0D6EFD 0%, #0B5ED7 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
margin: 0;
padding: 10px;
letter-spacing: 1px;
}
/* Subheader Styling */
.subheader {
font-size: 1.8em;
font-weight: 600;
background: linear-gradient(120deg, #0D6EFD 0%, #0B5ED7 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
margin-top: 20px;
margin-bottom: 10px;
text-align: left;
}
/* Table Container Styling */
.table-container {
background: linear-gradient(135deg, #FFFFFF 0%, #F8F9FA 100%);
padding: 20px;
border-radius: 10px;
margin: 20px 0;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
/* Dark mode adjustments */
@media (prefers-color-scheme: dark) {
.header-container {
background: linear-gradient(135deg, #212529 0%, #343A40 100%);
}
.section {
background: linear-gradient(135deg, #212529 0%, #2B3035 100%);
}
.table-container {
background: linear-gradient(135deg, #212529 0%, #2B3035 100%);
}
.header {
background: linear-gradient(120deg, #6EA8FE 0%, #9EC5FE 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
.subheader {
background: linear-gradient(120deg, #6EA8FE 0%, #9EC5FE 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
}
/* Button Styling */
.stButton > button {
background: linear-gradient(135deg, #2196F3 0%, #1976D2 100%);
color: white;
border: none;
padding: 10px 20px;
border-radius: 5px;
transition: all 0.3s ease;
}
.stButton > button:hover {
background: linear-gradient(135deg, #1976D2 0%, #1565C0 100%);
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.2);
}
/* Tab Styling */
.stTabs [data-baseweb="tab-list"] {
gap: 24px;
background: linear-gradient(135deg, #F8F9FA 0%, #E9ECEF 100%);
padding: 10px;
border-radius: 10px;
}
.stTabs [data-baseweb="tab"] {
background-color: transparent;
border-radius: 4px;
color: #1976D2;
font-weight: 600;
padding: 10px 16px;
}
.stTabs [aria-selected="true"] {
background: linear-gradient(120deg, #2196F3 0%, #1976D2 100%);
color: white;
}
/* Dark mode tab adjustments */
@media (prefers-color-scheme: dark) {
.stTabs [data-baseweb="tab-list"] {
background: linear-gradient(135deg, #212529 0%, #343A40 100%);
}
.stTabs [data-baseweb="tab"] {
color: #82B1FF;
}
.stTabs [aria-selected="true"] {
background: linear-gradient(120deg, #448AFF 0%, #2979FF 100%);
}
}
/* Message Styling */
.success {
background: linear-gradient(135deg, #43A047 0%, #2E7D32 100%);
color: white;
padding: 10px;
border-radius: 5px;
margin: 10px 0;
}
.error {
background: linear-gradient(135deg, #E53935 0%, #C62828 100%);
color: white;
padding: 10px;
border-radius: 5px;
margin: 10px 0;
}
.warning {
background: linear-gradient(135deg, #FB8C00 0%, #F57C00 100%);
color: white;
padding: 10px;
border-radius: 5px;
margin: 10px 0;
}
</style>
""", unsafe_allow_html=True)
st.sidebar.title("Navigation")
app_mode = st.sidebar.radio("Choose a mode:", ["Real-Time Call Analysis", "Dashboard"])
if app_mode == "Real-Time Call Analysis":
st.header("Real-Time Sales Call Analysis")
if st.button("Start Listening"):
real_time_analysis()
elif app_mode == "Dashboard":
st.header("Call Summaries and Sentiment Analysis")
try:
data = fetch_call_data(config["google_sheet_id"])
if data.empty:
st.warning("No data available in the Google Sheet.")
else:
sentiment_counts = data['Sentiment'].value_counts()
product_mentions = filter_product_mentions(data[['Chunk']].values.tolist(), product_titles)
product_mentions_df = pd.DataFrame(list(product_mentions.items()), columns=['Product', 'Count'])
col1, col2 = st.columns(2)
with col1:
st.subheader("Sentiment Distribution")
fig_bar = px.bar(
x=sentiment_counts.index,
y=sentiment_counts.values,
title='Number of Calls by Sentiment',
labels={'x': 'Sentiment', 'y': 'Number of Calls'},
color=sentiment_counts.index,
color_discrete_map={
'POSITIVE': 'green',
'NEGATIVE': 'red',
'NEUTRAL': 'blue'
}
)
st.plotly_chart(fig_bar)
with col2:
st.subheader("Most Mentioned Products")
fig_products = px.pie(
values=product_mentions_df['Count'],
names=product_mentions_df['Product'],
title='Most Mentioned Products'
)
st.plotly_chart(fig_products)
st.subheader("All Calls")
display_data = data.copy()
display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
call_details = data[data['Call ID'] == call_id]
if not call_details.empty:
st.subheader("Detailed Call Information")
st.write(f"**Call ID:** {call_id}")
st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
st.subheader("Full Call Summary")
st.text_area("Summary:",
value=call_details.iloc[0]['Summary'],
height=200,
disabled=True)
else:
st.error("No details available for the selected Call ID.")
except Exception as e:
st.error(f"Error loading dashboard: {e}")
if __name__ == "__main__":
run_app() |