Zasha1 commited on
Commit
ef79036
·
verified ·
1 Parent(s): 2a778a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +112 -99
app.py CHANGED
@@ -12,6 +12,7 @@ import plotly.express as px
12
  import streamlit as st
13
  import numpy as np
14
  import queue
 
15
 
16
  # Initialize components
17
  objection_handler = ObjectionHandler("objections.csv") # Use relative path
@@ -53,7 +54,11 @@ class AudioProcessor(AudioProcessorBase):
53
  def recv(self, frame):
54
  audio_data = frame.to_ndarray()
55
  audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() # Convert to int16 format
56
-
 
 
 
 
57
  # Transcribe the audio
58
  text = self.transcribe_audio(audio_bytes)
59
  if text:
@@ -82,32 +87,116 @@ def real_time_analysis():
82
  media_stream_constraints={"audio": True, "video": False},
83
  )
84
 
85
- # Display transcribed text from the queue
86
- while not transcription_queue.empty():
87
- text = transcription_queue.get()
88
- st.write(f"*Recognized Text:* {text}")
 
 
 
 
 
89
 
90
- # Analyze sentiment
91
- sentiment, score = analyze_sentiment(text)
92
- st.write(f"*Sentiment:* {sentiment} (Score: {score})")
93
 
94
- # Handle objection
95
- objection_response = handle_objection(text)
96
- st.write(f"*Objection Response:* {objection_response}")
 
 
97
 
98
- # Get product recommendation
99
- recommendations = []
100
- if is_valid_input(text) and is_relevant_sentiment(score):
101
- query_embedding = model.encode([text])
102
- distances, indices = product_recommender.index.search(query_embedding, 1)
103
 
104
- if distances[0][0] < 1.5: # Similarity threshold
105
- recommendations = product_recommender.get_recommendations(text)
 
 
106
 
107
- if recommendations:
108
- st.write("*Product Recommendations:*")
109
- for rec in recommendations:
110
- st.write(rec)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
  def run_app():
113
  st.set_page_config(page_title="Sales Call Assistant", layout="wide")
@@ -121,83 +210,7 @@ def run_app():
121
  real_time_analysis()
122
 
123
  elif app_mode == "Dashboard":
124
- st.header("Call Summaries and Sentiment Analysis")
125
- try:
126
- data = fetch_call_data(config["google_sheet_id"])
127
- if data.empty:
128
- st.warning("No data available in the Google Sheet.")
129
- else:
130
- # Sentiment Visualizations
131
- sentiment_counts = data['Sentiment'].value_counts()
132
-
133
- # Pie Chart
134
- col1, col2 = st.columns(2)
135
- with col1:
136
- st.subheader("Sentiment Distribution")
137
- fig_pie = px.pie(
138
- values=sentiment_counts.values,
139
- names=sentiment_counts.index,
140
- title='Call Sentiment Breakdown',
141
- color_discrete_map={
142
- 'POSITIVE': 'green',
143
- 'NEGATIVE': 'red',
144
- 'NEUTRAL': 'blue'
145
- }
146
- )
147
- st.plotly_chart(fig_pie)
148
-
149
- # Bar Chart
150
- with col2:
151
- st.subheader("Sentiment Counts")
152
- fig_bar = px.bar(
153
- x=sentiment_counts.index,
154
- y=sentiment_counts.values,
155
- title='Number of Calls by Sentiment',
156
- labels={'x': 'Sentiment', 'y': 'Number of Calls'},
157
- color=sentiment_counts.index,
158
- color_discrete_map={
159
- 'POSITIVE': 'green',
160
- 'NEGATIVE': 'red',
161
- 'NEUTRAL': 'blue'
162
- }
163
- )
164
- st.plotly_chart(fig_bar)
165
-
166
- # Existing Call Details Section
167
- st.subheader("All Calls")
168
- display_data = data.copy()
169
- display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
170
- st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
171
-
172
- # Dropdown to select Call ID
173
- unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
174
- call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
175
-
176
- # Display selected Call ID details
177
- call_details = data[data['Call ID'] == call_id]
178
- if not call_details.empty:
179
- st.subheader("Detailed Call Information")
180
- st.write(f"**Call ID:** {call_id}")
181
- st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
182
-
183
- # Expand summary section
184
- st.subheader("Full Call Summary")
185
- st.text_area("Summary:",
186
- value=call_details.iloc[0]['Summary'],
187
- height=200,
188
- disabled=True)
189
-
190
- # Show all chunks for the selected call
191
- st.subheader("Conversation Chunks")
192
- for _, row in call_details.iterrows():
193
- if pd.notna(row['Chunk']):
194
- st.write(f"**Chunk:** {row['Chunk']}")
195
- st.write(f"**Sentiment:** {row['Sentiment']}")
196
- st.write("---") # Separator between chunks
197
- else:
198
- st.error("No details available for the selected Call ID.")
199
- except Exception as e:
200
- st.error(f"Error loading dashboard: {e}")
201
 
202
  if __name__ == "__main__":
203
  run_app()
 
12
  import streamlit as st
13
  import numpy as np
14
  import queue
15
+ import threading
16
 
17
  # Initialize components
18
  objection_handler = ObjectionHandler("objections.csv") # Use relative path
 
54
  def recv(self, frame):
55
  audio_data = frame.to_ndarray()
56
  audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() # Convert to int16 format
57
+
58
+ # Debugging: Check audio data
59
+ print(f"Audio data shape: {audio_data.shape}")
60
+ print(f"Audio data sample: {audio_data[:10]}")
61
+
62
  # Transcribe the audio
63
  text = self.transcribe_audio(audio_bytes)
64
  if text:
 
87
  media_stream_constraints={"audio": True, "video": False},
88
  )
89
 
90
+ if webrtc_ctx.state.playing:
91
+ # Display transcribed text from the queue
92
+ while not transcription_queue.empty():
93
+ text = transcription_queue.get()
94
+ st.write(f"*Recognized Text:* {text}")
95
+
96
+ # Analyze sentiment
97
+ sentiment, score = analyze_sentiment(text)
98
+ st.write(f"*Sentiment:* {sentiment} (Score: {score})")
99
 
100
+ # Handle objection
101
+ objection_response = handle_objection(text)
102
+ st.write(f"*Objection Response:* {objection_response}")
103
 
104
+ # Get product recommendation
105
+ recommendations = []
106
+ if is_valid_input(text) and is_relevant_sentiment(score):
107
+ query_embedding = model.encode([text])
108
+ distances, indices = product_recommender.index.search(query_embedding, 1)
109
 
110
+ if distances[0][0] < 1.5: # Similarity threshold
111
+ recommendations = product_recommender.get_recommendations(text)
 
 
 
112
 
113
+ if recommendations:
114
+ st.write("*Product Recommendations:*")
115
+ for rec in recommendations:
116
+ st.write(rec)
117
 
118
+ def fetch_data_and_display():
119
+ try:
120
+ st.header("Call Summaries and Sentiment Analysis")
121
+ data = fetch_call_data(config["google_sheet_id"])
122
+
123
+ # Debugging: Log fetched data
124
+ print(f"Fetched data: {data}")
125
+
126
+ if data.empty:
127
+ st.warning("No data available in the Google Sheet.")
128
+ else:
129
+ # Sentiment Visualizations
130
+ sentiment_counts = data['Sentiment'].value_counts()
131
+
132
+ # Pie Chart
133
+ col1, col2 = st.columns(2)
134
+ with col1:
135
+ st.subheader("Sentiment Distribution")
136
+ fig_pie = px.pie(
137
+ values=sentiment_counts.values,
138
+ names=sentiment_counts.index,
139
+ title='Call Sentiment Breakdown',
140
+ color_discrete_map={
141
+ 'POSITIVE': 'green',
142
+ 'NEGATIVE': 'red',
143
+ 'NEUTRAL': 'blue'
144
+ }
145
+ )
146
+ st.plotly_chart(fig_pie)
147
+
148
+ # Bar Chart
149
+ with col2:
150
+ st.subheader("Sentiment Counts")
151
+ fig_bar = px.bar(
152
+ x=sentiment_counts.index,
153
+ y=sentiment_counts.values,
154
+ title='Number of Calls by Sentiment',
155
+ labels={'x': 'Sentiment', 'y': 'Number of Calls'},
156
+ color=sentiment_counts.index,
157
+ color_discrete_map={
158
+ 'POSITIVE': 'green',
159
+ 'NEGATIVE': 'red',
160
+ 'NEUTRAL': 'blue'
161
+ }
162
+ )
163
+ st.plotly_chart(fig_bar)
164
+
165
+ # Existing Call Details Section
166
+ st.subheader("All Calls")
167
+ display_data = data.copy()
168
+ display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
169
+ st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
170
+
171
+ # Dropdown to select Call ID
172
+ unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
173
+ call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
174
+
175
+ # Display selected Call ID details
176
+ call_details = data[data['Call ID'] == call_id]
177
+ if not call_details.empty:
178
+ st.subheader("Detailed Call Information")
179
+ st.write(f"**Call ID:** {call_id}")
180
+ st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
181
+
182
+ # Expand summary section
183
+ st.subheader("Full Call Summary")
184
+ st.text_area("Summary:",
185
+ value=call_details.iloc[0]['Summary'],
186
+ height=200,
187
+ disabled=True)
188
+
189
+ # Show all chunks for the selected call
190
+ st.subheader("Conversation Chunks")
191
+ for _, row in call_details.iterrows():
192
+ if pd.notna(row['Chunk']):
193
+ st.write(f"**Chunk:** {row['Chunk']}")
194
+ st.write(f"**Sentiment:** {row['Sentiment']}")
195
+ st.write("---") # Separator between chunks
196
+ else:
197
+ st.error("No details available for the selected Call ID.")
198
+ except Exception as e:
199
+ st.error(f"Error loading dashboard: {e}")
200
 
201
  def run_app():
202
  st.set_page_config(page_title="Sales Call Assistant", layout="wide")
 
210
  real_time_analysis()
211
 
212
  elif app_mode == "Dashboard":
213
+ fetch_data_and_display()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
  if __name__ == "__main__":
216
  run_app()