Zasha1 commited on
Commit
6495c45
·
verified ·
1 Parent(s): 95090b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +216 -144
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import speech_recognition as sr
2
  from sentiment_analysis import analyze_sentiment
3
  from product_recommender import ProductRecommender
4
  from objection_handler import ObjectionHandler
@@ -10,66 +10,42 @@ import uuid
10
  import pandas as pd
11
  import plotly.express as px
12
  import streamlit as st
13
- import pyaudio
 
 
14
 
15
  # Initialize components
16
  objection_handler = ObjectionHandler('objections.csv')
17
  product_recommender = ProductRecommender('recommendations.csv')
18
  model = SentenceTransformer('all-MiniLM-L6-v2')
19
 
20
- def list_audio_devices():
21
- """List available audio input devices using pyaudio."""
22
- p = pyaudio.PyAudio()
23
- devices = []
24
- for i in range(p.get_device_count()):
25
- device_info = p.get_device_info_by_index(i)
26
- if device_info["maxInputChannels"] > 0: # Check if it's an input device
27
- devices.append(device_info)
28
- return devices
29
-
30
  def real_time_analysis():
31
  st.info("Listening... Say 'stop' to end the process.")
32
 
33
- try:
34
- # List available audio devices
35
- devices = list_audio_devices()
36
- st.write("Available audio devices:")
37
- for device in devices:
38
- st.write(f"Device {device['index']}: {device['name']} (Input Channels: {device['maxInputChannels']})")
39
-
40
- if not devices:
41
- st.error("No audio input devices found. Please check the virtual microphone setup.")
42
- return
43
-
44
- # Use the first available input device
45
- device_index = devices[0]["index"]
46
- st.write(f"Using device index {device_index}: {devices[0]['name']}")
47
-
48
- recognizer = sr.Recognizer()
49
- mic = sr.Microphone(device_index=device_index)
50
-
51
- while True:
52
- with mic as source:
53
- st.write("Listening...")
54
- recognizer.adjust_for_ambient_noise(source)
55
- audio = recognizer.listen(source)
56
-
57
- try:
58
- st.write("Recognizing...")
59
- text = recognizer.recognize_google(audio)
60
  st.write(f"*Recognized Text:* {text}")
61
 
62
- if 'stop' in text.lower():
63
- st.write("Stopping real-time analysis...")
64
- break
65
-
66
- # Append to the total conversation
67
- total_text += text + " "
68
  sentiment, score = analyze_sentiment(text)
69
- sentiment_scores.append(score)
70
 
71
  # Handle objection
72
  objection_response = handle_objection(text)
 
73
 
74
  # Get product recommendation
75
  recommendations = []
@@ -80,96 +56,34 @@ def real_time_analysis():
80
  if distances[0][0] < 1.5: # Similarity threshold
81
  recommendations = product_recommender.get_recommendations(text)
82
 
83
- transcribed_chunks.append((text, sentiment, score))
84
-
85
- st.write(f"*Sentiment:* {sentiment} (Score: {score})")
86
- st.write(f"*Objection Response:* {objection_response}")
87
-
88
  if recommendations:
89
  st.write("*Product Recommendations:*")
90
  for rec in recommendations:
91
  st.write(rec)
92
 
93
- except sr.UnknownValueError:
94
- st.error("Speech Recognition could not understand the audio.")
95
- except sr.RequestError as e:
96
- st.error(f"Error with the Speech Recognition service: {e}")
97
- except Exception as e:
98
- st.error(f"Error during processing: {e}")
99
-
100
- # After conversation ends, calculate and display overall sentiment and summary
101
- overall_sentiment = calculate_overall_sentiment(sentiment_scores)
102
- call_summary = generate_comprehensive_summary(transcribed_chunks)
103
-
104
- st.subheader("Conversation Summary:")
105
- st.write(total_text.strip())
106
- st.subheader("Overall Sentiment:")
107
- st.write(overall_sentiment)
108
-
109
- # Store data in Google Sheets
110
- store_data_in_sheet(
111
- config["google_sheet_id"],
112
- transcribed_chunks,
113
- call_summary,
114
- overall_sentiment
115
- )
116
- st.success("Conversation data stored successfully in Google Sheets!")
117
-
118
- except Exception as e:
119
- st.error(f"Error in real-time analysis: {e}")
120
-
121
-
122
- def generate_comprehensive_summary(chunks):
123
- """Generate a comprehensive summary from conversation chunks."""
124
- full_text = " ".join([chunk[0] for chunk in chunks])
125
- total_chunks = len(chunks)
126
- sentiments = [chunk[1] for chunk in chunks]
127
-
128
- context_keywords = {
129
- 'product_inquiry': ['dress', 'product', 'price', 'stock'],
130
- 'pricing': ['cost', 'price', 'budget'],
131
- 'negotiation': ['installment', 'payment', 'manage']
132
- }
133
-
134
- themes = []
135
- for keyword_type, keywords in context_keywords.items():
136
- if any(keyword.lower() in full_text.lower() for keyword in keywords):
137
- themes.append(keyword_type)
138
-
139
- positive_count = sentiments.count('POSITIVE')
140
- negative_count = sentiments.count('NEGATIVE')
141
- neutral_count = sentiments.count('NEUTRAL')
142
-
143
- key_interactions = []
144
- for chunk in chunks:
145
- if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']):
146
- key_interactions.append(chunk[0])
147
-
148
- summary = f"Conversation Summary:\n"
149
- if 'product_inquiry' in themes:
150
- summary += "• Customer initiated a product inquiry about items.\n"
151
- if 'pricing' in themes:
152
- summary += "• Price and budget considerations were discussed.\n"
153
- if 'negotiation' in themes:
154
- summary += "• Customer and seller explored flexible payment options.\n"
155
-
156
- summary += f"\nConversation Sentiment:\n"
157
- summary += f"• Positive Interactions: {positive_count}\n"
158
- summary += f"• Negative Interactions: {negative_count}\n"
159
- summary += f"• Neutral Interactions: {neutral_count}\n"
160
-
161
- summary += "\nKey Conversation Points:\n"
162
- for interaction in key_interactions[:3]:
163
- summary += f"• {interaction}\n"
164
-
165
- if positive_count > negative_count:
166
- summary += "\nOutcome: Constructive and potentially successful interaction."
167
- elif negative_count > positive_count:
168
- summary += "\nOutcome: Interaction may require further follow-up."
169
- else:
170
- summary += "\nOutcome: Neutral interaction with potential for future engagement."
171
-
172
- return summary
173
 
174
  def is_valid_input(text):
175
  text = text.strip().lower()
@@ -180,17 +94,167 @@ def is_valid_input(text):
180
  def is_relevant_sentiment(sentiment_score):
181
  return sentiment_score > 0.4
182
 
183
- def calculate_overall_sentiment(sentiment_scores):
184
- if sentiment_scores:
185
- average_sentiment = sum(sentiment_scores) / len(sentiment_scores)
186
- overall_sentiment = (
187
- "POSITIVE" if average_sentiment > 0 else
188
- "NEGATIVE" if average_sentiment < 0 else
189
- "NEUTRAL"
190
- )
191
- else:
192
- overall_sentiment = "NEUTRAL"
193
- return overall_sentiment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
  def handle_objection(text):
196
  query_embedding = model.encode([text])
@@ -200,6 +264,15 @@ def handle_objection(text):
200
  return "\n".join(responses) if responses else "No objection response found."
201
  return "No objection response found."
202
 
 
 
 
 
 
 
 
 
 
203
  def run_app():
204
  st.set_page_config(page_title="Sales Call Assistant", layout="wide")
205
  st.title("AI Sales Call Assistant")
@@ -209,8 +282,7 @@ def run_app():
209
 
210
  if app_mode == "Real-Time Call Analysis":
211
  st.header("Real-Time Sales Call Analysis")
212
- if st.button("Start Listening"):
213
- real_time_analysis()
214
 
215
  elif app_mode == "Dashboard":
216
  st.header("Call Summaries and Sentiment Analysis")
 
1
+ from streamlit_webrtc import webrtc_streamer, WebRtcMode
2
  from sentiment_analysis import analyze_sentiment
3
  from product_recommender import ProductRecommender
4
  from objection_handler import ObjectionHandler
 
10
  import pandas as pd
11
  import plotly.express as px
12
  import streamlit as st
13
+ import numpy as np
14
+ from io import BytesIO
15
+ import wave
16
 
17
  # Initialize components
18
  objection_handler = ObjectionHandler('objections.csv')
19
  product_recommender = ProductRecommender('recommendations.csv')
20
  model = SentenceTransformer('all-MiniLM-L6-v2')
21
 
 
 
 
 
 
 
 
 
 
 
22
  def real_time_analysis():
23
  st.info("Listening... Say 'stop' to end the process.")
24
 
25
+ def audio_frame_callback(audio_frame):
26
+ # Convert audio frame to bytes
27
+ audio_bytes = audio_frame.to_ndarray().tobytes()
28
+
29
+ # Save audio bytes to a temporary WAV file
30
+ with BytesIO() as wav_buffer:
31
+ with wave.open(wav_buffer, 'wb') as wav_file:
32
+ wav_file.setnchannels(1) # Mono audio
33
+ wav_file.setsampwidth(2) # 2 bytes for int16
34
+ wav_file.setframerate(16000) # Sample rate
35
+ wav_file.writeframes(audio_bytes)
36
+
37
+ # Transcribe the audio
38
+ text = transcribe_audio(wav_buffer.getvalue())
39
+ if text:
 
 
 
 
 
 
 
 
 
 
 
 
40
  st.write(f"*Recognized Text:* {text}")
41
 
42
+ # Analyze sentiment
 
 
 
 
 
43
  sentiment, score = analyze_sentiment(text)
44
+ st.write(f"*Sentiment:* {sentiment} (Score: {score})")
45
 
46
  # Handle objection
47
  objection_response = handle_objection(text)
48
+ st.write(f"*Objection Response:* {objection_response}")
49
 
50
  # Get product recommendation
51
  recommendations = []
 
56
  if distances[0][0] < 1.5: # Similarity threshold
57
  recommendations = product_recommender.get_recommendations(text)
58
 
 
 
 
 
 
59
  if recommendations:
60
  st.write("*Product Recommendations:*")
61
  for rec in recommendations:
62
  st.write(rec)
63
 
64
+ return audio_frame
65
+
66
+ # Start WebRTC audio stream
67
+ webrtc_ctx = webrtc_streamer(
68
+ key="real-time-audio",
69
+ mode=WebRtcMode.SENDONLY,
70
+ audio_frame_callback=audio_frame_callback,
71
+ media_stream_constraints={"audio": True, "video": False},
72
+ )
73
+
74
+ def transcribe_audio(audio_bytes):
75
+ """Transcribe audio using a speech-to-text model or API."""
76
+ # Replace this with your actual speech-to-text implementation
77
+ # For now, we'll just return a dummy text
78
+ return "This is a placeholder transcription."
79
+
80
+ def handle_objection(text):
81
+ query_embedding = model.encode([text])
82
+ distances, indices = objection_handler.index.search(query_embedding, 1)
83
+ if distances[0][0] < 1.5: # Adjust similarity threshold as needed
84
+ responses = objection_handler.handle_objection(text)
85
+ return "\n".join(responses) if responses else "No objection response found."
86
+ return "No objection response found."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
  def is_valid_input(text):
89
  text = text.strip().lower()
 
94
  def is_relevant_sentiment(sentiment_score):
95
  return sentiment_score > 0.4
96
 
97
+ def run_app():
98
+ st.set_page_config(page_title="Sales Call Assistant", layout="wide")
99
+ st.title("AI Sales Call Assistant")
100
+
101
+ st.sidebar.title("Navigation")
102
+ app_mode = st.sidebar.radio("Choose a mode:", ["Real-Time Call Analysis", "Dashboard"])
103
+
104
+ if app_mode == "Real-Time Call Analysis":
105
+ st.header("Real-Time Sales Call Analysis")
106
+ real_time_analysis()
107
+
108
+ elif app_mode == "Dashboard":
109
+ st.header("Call Summaries and Sentiment Analysis")
110
+ try:
111
+ data = fetch_call_data(config["google_sheet_id"])
112
+ if data.empty:
113
+ st.warning("No data available in the Google Sheet.")
114
+ else:
115
+ sentiment_counts = data['Sentiment'].value_counts()
116
+
117
+ col1, col2 = st.columns(2)
118
+ with col1:
119
+ st.subheader("Sentiment Distribution")
120
+ fig_pie = px.pie(
121
+ values=sentiment_counts.values,
122
+ names=sentiment_counts.index,
123
+ title='Call Sentiment Breakdown',
124
+ color_discrete_map={
125
+ 'POSITIVE': 'green',
126
+ 'NEGATIVE': 'red',
127
+ 'NEUTRAL': 'blue'
128
+ }
129
+ )
130
+ st.plotly_chart(fig_pie)
131
+
132
+ with col2:
133
+ st.subheader("Sentiment Counts")
134
+ fig_bar = px.bar(
135
+ x=sentiment_counts.index,
136
+ y=sentiment_counts.values,
137
+ title='Number of Calls by Sentiment',
138
+ labels={'x': 'Sentiment', 'y': 'Number of Calls'},
139
+ color=sentiment_counts.index,
140
+ color_discrete_map={
141
+ 'POSITIVE': 'green',
142
+ 'NEGATIVE': 'red',
143
+ 'NEUTRAL': 'blue'
144
+ }
145
+ )
146
+ st.plotly_chart(fig_bar)
147
+
148
+ st.subheader("All Calls")
149
+ display_data = data.copy()
150
+ display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
151
+ st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
152
+
153
+ unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
154
+ call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
155
+
156
+ call_details = data[data['Call ID'] == call_id]
157
+ if not call_details.empty:
158
+ st.subheader("Detailed Call Information")
159
+ st.write(f"**Call ID:** {call_id}")
160
+ st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
161
+
162
+ st.subheader("Full Call Summary")
163
+ st.text_area("Summary:",
164
+ value=call_details.iloc[0]['Summary'],
165
+ height=200,
166
+ disabled=True)
167
+
168
+ st.subheader("Conversation Chunks")
169
+ for _, row in call_details.iterrows():
170
+ if pd.notna(row['Chunk']):
171
+ st.write(f"**Chunk:** {row['Chunk']}")
172
+ st.write(f"**Sentiment:** {row['Sentiment']}")
173
+ st.write("---")
174
+ else:
175
+ st.error("No details available for the selected Call ID.")
176
+ except Exception as e:
177
+ st.error(f"Error loading dashboard: {e}")
178
+
179
+ if __name__ == "__main__":
180
+ run_app()from streamlit_webrtc import webrtc_streamer, WebRtcMode
181
+ from sentiment_analysis import analyze_sentiment
182
+ from product_recommender import ProductRecommender
183
+ from objection_handler import ObjectionHandler
184
+ from google_sheets import fetch_call_data, store_data_in_sheet
185
+ from sentence_transformers import SentenceTransformer
186
+ from env_setup import config
187
+ import re
188
+ import uuid
189
+ import pandas as pd
190
+ import plotly.express as px
191
+ import streamlit as st
192
+ import numpy as np
193
+ from io import BytesIO
194
+ import wave
195
+
196
+ # Initialize components
197
+ objection_handler = ObjectionHandler('objections.csv')
198
+ product_recommender = ProductRecommender('recommendations.csv')
199
+ model = SentenceTransformer('all-MiniLM-L6-v2')
200
+
201
+ def real_time_analysis():
202
+ st.info("Listening... Say 'stop' to end the process.")
203
+
204
+ def audio_frame_callback(audio_frame):
205
+ # Convert audio frame to bytes
206
+ audio_bytes = audio_frame.to_ndarray().tobytes()
207
+
208
+ # Save audio bytes to a temporary WAV file
209
+ with BytesIO() as wav_buffer:
210
+ with wave.open(wav_buffer, 'wb') as wav_file:
211
+ wav_file.setnchannels(1) # Mono audio
212
+ wav_file.setsampwidth(2) # 2 bytes for int16
213
+ wav_file.setframerate(16000) # Sample rate
214
+ wav_file.writeframes(audio_bytes)
215
+
216
+ # Transcribe the audio
217
+ text = transcribe_audio(wav_buffer.getvalue())
218
+ if text:
219
+ st.write(f"*Recognized Text:* {text}")
220
+
221
+ # Analyze sentiment
222
+ sentiment, score = analyze_sentiment(text)
223
+ st.write(f"*Sentiment:* {sentiment} (Score: {score})")
224
+
225
+ # Handle objection
226
+ objection_response = handle_objection(text)
227
+ st.write(f"*Objection Response:* {objection_response}")
228
+
229
+ # Get product recommendation
230
+ recommendations = []
231
+ if is_valid_input(text) and is_relevant_sentiment(score):
232
+ query_embedding = model.encode([text])
233
+ distances, indices = product_recommender.index.search(query_embedding, 1)
234
+
235
+ if distances[0][0] < 1.5: # Similarity threshold
236
+ recommendations = product_recommender.get_recommendations(text)
237
+
238
+ if recommendations:
239
+ st.write("*Product Recommendations:*")
240
+ for rec in recommendations:
241
+ st.write(rec)
242
+
243
+ return audio_frame
244
+
245
+ # Start WebRTC audio stream
246
+ webrtc_ctx = webrtc_streamer(
247
+ key="real-time-audio",
248
+ mode=WebRtcMode.SENDONLY,
249
+ audio_frame_callback=audio_frame_callback,
250
+ media_stream_constraints={"audio": True, "video": False},
251
+ )
252
+
253
+ def transcribe_audio(audio_bytes):
254
+ """Transcribe audio using a speech-to-text model or API."""
255
+ # Replace this with your actual speech-to-text implementation
256
+ # For now, we'll just return a dummy text
257
+ return "This is a placeholder transcription."
258
 
259
  def handle_objection(text):
260
  query_embedding = model.encode([text])
 
264
  return "\n".join(responses) if responses else "No objection response found."
265
  return "No objection response found."
266
 
267
+ def is_valid_input(text):
268
+ text = text.strip().lower()
269
+ if len(text) < 3 or re.match(r'^[a-zA-Z\s]*$', text) is None:
270
+ return False
271
+ return True
272
+
273
+ def is_relevant_sentiment(sentiment_score):
274
+ return sentiment_score > 0.4
275
+
276
  def run_app():
277
  st.set_page_config(page_title="Sales Call Assistant", layout="wide")
278
  st.title("AI Sales Call Assistant")
 
282
 
283
  if app_mode == "Real-Time Call Analysis":
284
  st.header("Real-Time Sales Call Analysis")
285
+ real_time_analysis()
 
286
 
287
  elif app_mode == "Dashboard":
288
  st.header("Call Summaries and Sentiment Analysis")