Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import speech_recognition as sr
|
2 |
from sentiment_analysis import analyze_sentiment
|
3 |
from product_recommender import ProductRecommender
|
@@ -19,6 +21,110 @@ objection_handler = ObjectionHandler('objections.csv')
|
|
19 |
product_recommender = ProductRecommender('recommendations.csv')
|
20 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
def generate_comprehensive_summary(chunks):
|
23 |
"""
|
24 |
Generate a comprehensive summary from conversation chunks
|
@@ -88,6 +194,7 @@ def generate_comprehensive_summary(chunks):
|
|
88 |
|
89 |
return summary
|
90 |
|
|
|
91 |
def is_valid_input(text):
|
92 |
text = text.strip().lower()
|
93 |
if len(text) < 3 or re.match(r'^[a-zA-Z\s]*$', text) is None:
|
@@ -109,87 +216,6 @@ def calculate_overall_sentiment(sentiment_scores):
|
|
109 |
overall_sentiment = "NEUTRAL"
|
110 |
return overall_sentiment
|
111 |
|
112 |
-
def real_time_analysis():
|
113 |
-
recognizer = sr.Recognizer()
|
114 |
-
mic = sr.Microphone()
|
115 |
-
|
116 |
-
st.info("Say 'stop' to end the process.")
|
117 |
-
|
118 |
-
sentiment_scores = []
|
119 |
-
transcribed_chunks = []
|
120 |
-
total_text = ""
|
121 |
-
|
122 |
-
try:
|
123 |
-
while True:
|
124 |
-
with mic as source:
|
125 |
-
st.write("Listening...")
|
126 |
-
recognizer.adjust_for_ambient_noise(source)
|
127 |
-
audio = recognizer.listen(source)
|
128 |
-
|
129 |
-
try:
|
130 |
-
st.write("Recognizing...")
|
131 |
-
text = recognizer.recognize_google(audio)
|
132 |
-
st.write(f"*Recognized Text:* {text}")
|
133 |
-
|
134 |
-
if 'stop' in text.lower():
|
135 |
-
st.write("Stopping real-time analysis...")
|
136 |
-
break
|
137 |
-
|
138 |
-
# Append to the total conversation
|
139 |
-
total_text += text + " "
|
140 |
-
sentiment, score = analyze_sentiment(text)
|
141 |
-
sentiment_scores.append(score)
|
142 |
-
|
143 |
-
# Handle objection
|
144 |
-
objection_response = handle_objection(text)
|
145 |
-
|
146 |
-
# Get product recommendation
|
147 |
-
recommendations = []
|
148 |
-
if is_valid_input(text) and is_relevant_sentiment(score):
|
149 |
-
query_embedding = model.encode([text])
|
150 |
-
distances, indices = product_recommender.index.search(query_embedding, 1)
|
151 |
-
|
152 |
-
if distances[0][0] < 1.5: # Similarity threshold
|
153 |
-
recommendations = product_recommender.get_recommendations(text)
|
154 |
-
|
155 |
-
transcribed_chunks.append((text, sentiment, score))
|
156 |
-
|
157 |
-
st.write(f"*Sentiment:* {sentiment} (Score: {score})")
|
158 |
-
st.write(f"*Objection Response:* {objection_response}")
|
159 |
-
|
160 |
-
if recommendations:
|
161 |
-
st.write("*Product Recommendations:*")
|
162 |
-
for rec in recommendations:
|
163 |
-
st.write(rec)
|
164 |
-
|
165 |
-
except sr.UnknownValueError:
|
166 |
-
st.error("Speech Recognition could not understand the audio.")
|
167 |
-
except sr.RequestError as e:
|
168 |
-
st.error(f"Error with the Speech Recognition service: {e}")
|
169 |
-
except Exception as e:
|
170 |
-
st.error(f"Error during processing: {e}")
|
171 |
-
|
172 |
-
# After conversation ends, calculate and display overall sentiment and summary
|
173 |
-
overall_sentiment = calculate_overall_sentiment(sentiment_scores)
|
174 |
-
call_summary = generate_comprehensive_summary(transcribed_chunks)
|
175 |
-
|
176 |
-
st.subheader("Conversation Summary:")
|
177 |
-
st.write(total_text.strip())
|
178 |
-
st.subheader("Overall Sentiment:")
|
179 |
-
st.write(overall_sentiment)
|
180 |
-
|
181 |
-
# Store data in Google Sheets
|
182 |
-
store_data_in_sheet(
|
183 |
-
config["google_sheet_id"],
|
184 |
-
transcribed_chunks,
|
185 |
-
call_summary,
|
186 |
-
overall_sentiment
|
187 |
-
)
|
188 |
-
st.success("Conversation data stored successfully in Google Sheets!")
|
189 |
-
|
190 |
-
except Exception as e:
|
191 |
-
st.error(f"Error in real-time analysis: {e}")
|
192 |
-
|
193 |
def handle_objection(text):
|
194 |
query_embedding = model.encode([text])
|
195 |
distances, indices = objection_handler.index.search(query_embedding, 1)
|
@@ -198,7 +224,6 @@ def handle_objection(text):
|
|
198 |
return "\n".join(responses) if responses else "No objection response found."
|
199 |
return "No objection response found."
|
200 |
|
201 |
-
# (Previous imports remain the same)
|
202 |
|
203 |
def run_app():
|
204 |
st.set_page_config(page_title="Sales Call Assistant", layout="wide")
|
|
|
1 |
+
import numpy as np
|
2 |
+
import sounddevice as sd
|
3 |
import speech_recognition as sr
|
4 |
from sentiment_analysis import analyze_sentiment
|
5 |
from product_recommender import ProductRecommender
|
|
|
21 |
product_recommender = ProductRecommender('recommendations.csv')
|
22 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
23 |
|
24 |
+
def record_audio(duration=5, sample_rate=16000):
|
25 |
+
"""
|
26 |
+
Record audio using sounddevice and return as NumPy array
|
27 |
+
"""
|
28 |
+
st.write("Recording...")
|
29 |
+
audio = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=1, dtype='float32')
|
30 |
+
sd.wait()
|
31 |
+
st.write("Recording finished.")
|
32 |
+
return audio.flatten(), sample_rate
|
33 |
+
|
34 |
+
def numpy_to_audio_data(audio_data):
|
35 |
+
"""
|
36 |
+
Convert NumPy array to AudioData for speech_recognition
|
37 |
+
"""
|
38 |
+
# Convert float32 to int16
|
39 |
+
int_audio = (audio_data * 32767).astype(np.int16)
|
40 |
+
|
41 |
+
# Create AudioData object
|
42 |
+
recognizer = sr.Recognizer()
|
43 |
+
audio_data = sr.AudioData(
|
44 |
+
int_audio.tobytes(),
|
45 |
+
sample_rate=16000,
|
46 |
+
sample_width=int_audio.dtype.itemsize
|
47 |
+
)
|
48 |
+
return audio_data
|
49 |
+
|
50 |
+
def real_time_analysis():
|
51 |
+
recognizer = sr.Recognizer()
|
52 |
+
sentiment_scores = []
|
53 |
+
transcribed_chunks = []
|
54 |
+
total_text = ""
|
55 |
+
|
56 |
+
try:
|
57 |
+
while True:
|
58 |
+
# Record audio using sounddevice
|
59 |
+
audio_array, sample_rate = record_audio()
|
60 |
+
|
61 |
+
# Convert NumPy array to AudioData
|
62 |
+
audio_data = numpy_to_audio_data(audio_array)
|
63 |
+
|
64 |
+
try:
|
65 |
+
# Transcribe using speech_recognition
|
66 |
+
text = recognizer.recognize_google(audio_data)
|
67 |
+
st.write(f"*Recognized Text:* {text}")
|
68 |
+
|
69 |
+
if 'stop' in text.lower():
|
70 |
+
st.write("Stopping real-time analysis...")
|
71 |
+
break
|
72 |
+
|
73 |
+
# Append to the total conversation
|
74 |
+
total_text += text + " "
|
75 |
+
sentiment, score = analyze_sentiment(text)
|
76 |
+
sentiment_scores.append(score)
|
77 |
+
|
78 |
+
# Handle objection
|
79 |
+
objection_response = handle_objection(text)
|
80 |
+
|
81 |
+
# Get product recommendation
|
82 |
+
recommendations = []
|
83 |
+
if is_valid_input(text) and is_relevant_sentiment(score):
|
84 |
+
query_embedding = model.encode([text])
|
85 |
+
distances, indices = product_recommender.index.search(query_embedding, 1)
|
86 |
+
|
87 |
+
if distances[0][0] < 1.5: # Similarity threshold
|
88 |
+
recommendations = product_recommender.get_recommendations(text)
|
89 |
+
|
90 |
+
transcribed_chunks.append((text, sentiment, score))
|
91 |
+
|
92 |
+
st.write(f"*Sentiment:* {sentiment} (Score: {score})")
|
93 |
+
st.write(f"*Objection Response:* {objection_response}")
|
94 |
+
|
95 |
+
if recommendations:
|
96 |
+
st.write("*Product Recommendations:*")
|
97 |
+
for rec in recommendations:
|
98 |
+
st.write(rec)
|
99 |
+
|
100 |
+
except sr.UnknownValueError:
|
101 |
+
st.error("Speech Recognition could not understand the audio.")
|
102 |
+
except sr.RequestError as e:
|
103 |
+
st.error(f"Error with the Speech Recognition service: {e}")
|
104 |
+
except Exception as e:
|
105 |
+
st.error(f"Error during processing: {e}")
|
106 |
+
|
107 |
+
# After conversation ends, calculate and display overall sentiment and summary
|
108 |
+
overall_sentiment = calculate_overall_sentiment(sentiment_scores)
|
109 |
+
call_summary = generate_comprehensive_summary(transcribed_chunks)
|
110 |
+
|
111 |
+
st.subheader("Conversation Summary:")
|
112 |
+
st.write(total_text.strip())
|
113 |
+
st.subheader("Overall Sentiment:")
|
114 |
+
st.write(overall_sentiment)
|
115 |
+
|
116 |
+
# Store data in Google Sheets
|
117 |
+
store_data_in_sheet(
|
118 |
+
config["google_sheet_id"],
|
119 |
+
transcribed_chunks,
|
120 |
+
call_summary,
|
121 |
+
overall_sentiment
|
122 |
+
)
|
123 |
+
st.success("Conversation data stored successfully in Google Sheets!")
|
124 |
+
|
125 |
+
except Exception as e:
|
126 |
+
st.error(f"Error in real-time analysis: {e}")
|
127 |
+
|
128 |
def generate_comprehensive_summary(chunks):
|
129 |
"""
|
130 |
Generate a comprehensive summary from conversation chunks
|
|
|
194 |
|
195 |
return summary
|
196 |
|
197 |
+
|
198 |
def is_valid_input(text):
|
199 |
text = text.strip().lower()
|
200 |
if len(text) < 3 or re.match(r'^[a-zA-Z\s]*$', text) is None:
|
|
|
216 |
overall_sentiment = "NEUTRAL"
|
217 |
return overall_sentiment
|
218 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
def handle_objection(text):
|
220 |
query_embedding = model.encode([text])
|
221 |
distances, indices = objection_handler.index.search(query_embedding, 1)
|
|
|
224 |
return "\n".join(responses) if responses else "No objection response found."
|
225 |
return "No objection response found."
|
226 |
|
|
|
227 |
|
228 |
def run_app():
|
229 |
st.set_page_config(page_title="Sales Call Assistant", layout="wide")
|