Zasha1 commited on
Commit
b4e4003
·
verified ·
1 Parent(s): 70d9fc1

Update sentiment_analysis.py

Browse files
Files changed (1) hide show
  1. sentiment_analysis.py +17 -11
sentiment_analysis.py CHANGED
@@ -5,7 +5,7 @@ from speech_recognition import Recognizer, Microphone, AudioData, UnknownValueEr
5
  from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
6
  from huggingface_hub import login
7
  from product_recommender import ProductRecommender
8
- from objection_handler import load_objections, check_objections # Ensure check_objections is imported
9
  from objection_handler import ObjectionHandler
10
  from env_setup import config
11
  from sentence_transformers import SentenceTransformer
@@ -61,6 +61,10 @@ def analyze_sentiment(text):
61
 
62
  def transcribe_with_chunks(objections_dict):
63
  print("Note: If microphone access fails, please use alternative input.")
 
 
 
 
64
 
65
  try:
66
  # Try to list available microphones
@@ -69,6 +73,17 @@ def transcribe_with_chunks(objections_dict):
69
  except Exception as e:
70
  print(f"Could not detect microphones: {e}")
71
 
 
 
 
 
 
 
 
 
 
 
 
72
  try:
73
  # Try multiple device indices
74
  mic = None
@@ -84,14 +99,6 @@ def transcribe_with_chunks(objections_dict):
84
  print("No microphone available. Please provide text input.")
85
  return []
86
 
87
- # Initialize handlers with semantic search capabilities
88
- objection_handler = ObjectionHandler(r"C:\Users\shaik\Downloads\Sales Calls Transcriptions - Sheet3.csv")
89
- product_recommender = ProductRecommender(r"C:\Users\shaik\Downloads\Sales Calls Transcriptions - Sheet2.csv")
90
-
91
- # Load the embeddings model once
92
- model = SentenceTransformer('all-MiniLM-L6-v2')
93
-
94
- try:
95
  with mic as source:
96
  recognizer.adjust_for_ambient_noise(source)
97
  print("Microphone calibrated. Please speak.")
@@ -102,7 +109,6 @@ def transcribe_with_chunks(objections_dict):
102
  audio_data = recognizer.listen(source, timeout=5)
103
  text = recognizer.recognize_google(audio_data)
104
 
105
-
106
  if "start listening" in text.lower():
107
  is_listening = True
108
  print("Listening started. Speak into the microphone.")
@@ -165,7 +171,7 @@ def transcribe_with_chunks(objections_dict):
165
  return chunks
166
 
167
  if __name__ == "__main__":
168
- objections_file_path = r"C:\Users\shaik\Downloads\Sales Calls Transcriptions - Sheet3.csv"
169
  objections_dict = load_objections(objections_file_path)
170
  transcribed_chunks = transcribe_with_chunks(objections_dict)
171
  print("Final transcriptions and sentiments:", transcribed_chunks)
 
5
  from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
6
  from huggingface_hub import login
7
  from product_recommender import ProductRecommender
8
+ from objection_handler import load_objections, check_objections
9
  from objection_handler import ObjectionHandler
10
  from env_setup import config
11
  from sentence_transformers import SentenceTransformer
 
61
 
62
  def transcribe_with_chunks(objections_dict):
63
  print("Note: If microphone access fails, please use alternative input.")
64
+ chunks = []
65
+ current_chunk = []
66
+ chunk_start_time = time.time()
67
+ is_listening = False
68
 
69
  try:
70
  # Try to list available microphones
 
73
  except Exception as e:
74
  print(f"Could not detect microphones: {e}")
75
 
76
+ # Replace hardcoded path with environment variable or relative path
77
+ objection_file_path = config.get("OBJECTION_DATA_PATH", "objections.csv")
78
+ product_file_path = config.get("PRODUCT_DATA_PATH", "recommendations.csv")
79
+
80
+ # Initialize handlers with semantic search capabilities
81
+ objection_handler = ObjectionHandler(objection_file_path)
82
+ product_recommender = ProductRecommender(product_file_path)
83
+
84
+ # Load the embeddings model once
85
+ model = SentenceTransformer('all-MiniLM-L6-v2')
86
+
87
  try:
88
  # Try multiple device indices
89
  mic = None
 
99
  print("No microphone available. Please provide text input.")
100
  return []
101
 
 
 
 
 
 
 
 
 
102
  with mic as source:
103
  recognizer.adjust_for_ambient_noise(source)
104
  print("Microphone calibrated. Please speak.")
 
109
  audio_data = recognizer.listen(source, timeout=5)
110
  text = recognizer.recognize_google(audio_data)
111
 
 
112
  if "start listening" in text.lower():
113
  is_listening = True
114
  print("Listening started. Speak into the microphone.")
 
171
  return chunks
172
 
173
  if __name__ == "__main__":
174
+ objections_file_path = config.get("OBJECTION_DATA_PATH", "objections.csv")
175
  objections_dict = load_objections(objections_file_path)
176
  transcribed_chunks = transcribe_with_chunks(objections_dict)
177
  print("Final transcriptions and sentiments:", transcribed_chunks)