Spaces:
Runtime error
Runtime error
invincible-jha
commited on
Upload app.py
Browse files
app.py
CHANGED
@@ -9,6 +9,8 @@ import plotly.graph_objects as go
|
|
9 |
import warnings
|
10 |
import os
|
11 |
from scipy.stats import kurtosis, skew
|
|
|
|
|
12 |
|
13 |
# Suppress unnecessary warnings for cleaner output
|
14 |
warnings.filterwarnings('ignore')
|
@@ -18,317 +20,139 @@ processor = None
|
|
18 |
whisper_model = None
|
19 |
emotion_tokenizer = None
|
20 |
emotion_model = None
|
|
|
21 |
|
22 |
-
|
23 |
-
"""
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
# Load the Whisper model for speech recognition
|
36 |
-
print("Loading Whisper model...")
|
37 |
-
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
|
38 |
-
whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
|
39 |
-
|
40 |
-
# Load the emotion detection model
|
41 |
-
print("Loading emotion model...")
|
42 |
-
emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
43 |
-
emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
44 |
-
|
45 |
-
# Move models to CPU for consistent performance
|
46 |
-
device = "cpu"
|
47 |
-
whisper_model.to(device)
|
48 |
-
emotion_model.to(device)
|
49 |
-
|
50 |
-
print("Models loaded successfully!")
|
51 |
-
return True
|
52 |
-
|
53 |
-
except Exception as e:
|
54 |
-
print(f"Error loading models: {str(e)}")
|
55 |
-
return False
|
56 |
|
57 |
-
def
|
58 |
-
|
59 |
-
|
60 |
-
This function analyzes the audio waveform to extract various voice features
|
61 |
-
including pitch, energy, rhythm, and voice quality metrics. It includes
|
62 |
-
robust error handling and validation for each feature.
|
63 |
-
|
64 |
-
Args:
|
65 |
-
waveform (numpy.ndarray): Audio signal
|
66 |
-
sr (int): Sampling rate of the audio
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
# Validate input waveform
|
73 |
-
if waveform is None or len(waveform) == 0:
|
74 |
-
print("Error: Empty or invalid waveform")
|
75 |
-
return None
|
76 |
-
|
77 |
-
features = {}
|
78 |
-
|
79 |
-
# Extract pitch features with enhanced reliability
|
80 |
-
try:
|
81 |
-
# Configure pitch detection parameters for optimal results
|
82 |
-
pitches, magnitudes = librosa.piptrack(
|
83 |
-
y=waveform,
|
84 |
-
sr=sr,
|
85 |
-
fmin=50, # Minimum frequency for human voice
|
86 |
-
fmax=2000, # Maximum frequency for human voice
|
87 |
-
n_mels=128, # Frequency resolution
|
88 |
-
hop_length=512, # Time resolution
|
89 |
-
win_length=2048 # Analysis window size
|
90 |
-
)
|
91 |
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
index = magnitudes[:, t].argmax()
|
96 |
-
pitch = pitches[index, t]
|
97 |
-
if 50 <= pitch <= 2000: # Physiologically valid range
|
98 |
-
f0_contour.append(pitch)
|
99 |
-
|
100 |
-
f0_contour = np.array(f0_contour)
|
101 |
-
|
102 |
-
# Calculate pitch statistics with validation
|
103 |
-
if len(f0_contour) > 0:
|
104 |
-
features['pitch_mean'] = float(np.mean(f0_contour))
|
105 |
-
features['pitch_std'] = float(np.std(f0_contour))
|
106 |
-
features['pitch_range'] = float(np.ptp(f0_contour))
|
107 |
-
else:
|
108 |
-
# Use default values if no valid pitch detected
|
109 |
-
features['pitch_mean'] = 160.0 # Average adult speaking pitch
|
110 |
-
features['pitch_std'] = 0.0
|
111 |
-
features['pitch_range'] = 0.0
|
112 |
-
|
113 |
-
except Exception as e:
|
114 |
-
print(f"Error in pitch extraction: {str(e)}")
|
115 |
-
features['pitch_mean'] = 160.0
|
116 |
-
features['pitch_std'] = 0.0
|
117 |
-
features['pitch_range'] = 0.0
|
118 |
-
|
119 |
-
# Extract energy features with noise reduction
|
120 |
-
try:
|
121 |
-
rms = librosa.feature.rms(
|
122 |
-
y=waveform,
|
123 |
-
frame_length=2048,
|
124 |
-
hop_length=512,
|
125 |
-
center=True
|
126 |
-
)[0]
|
127 |
-
|
128 |
-
features['energy_mean'] = float(np.mean(rms))
|
129 |
-
features['energy_std'] = float(np.std(rms))
|
130 |
-
features['energy_range'] = float(np.ptp(rms))
|
131 |
-
|
132 |
-
except Exception as e:
|
133 |
-
print(f"Error in energy extraction: {str(e)}")
|
134 |
-
features['energy_mean'] = 0.02
|
135 |
-
features['energy_std'] = 0.0
|
136 |
-
features['energy_range'] = 0.0
|
137 |
-
|
138 |
-
# Extract rhythm features with improved accuracy
|
139 |
try:
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
|
|
|
|
|
|
145 |
)
|
146 |
-
|
147 |
-
tempo = librosa.beat.tempo(
|
148 |
-
onset_envelope=onset_env,
|
149 |
-
sr=sr,
|
150 |
-
hop_length=512,
|
151 |
-
aggregate=None
|
152 |
-
)
|
153 |
-
|
154 |
-
# Validate tempo within normal speech range
|
155 |
-
if 40 <= tempo[0] <= 240:
|
156 |
-
features['tempo'] = float(tempo[0])
|
157 |
-
else:
|
158 |
-
features['tempo'] = 120.0 # Default speaking rate
|
159 |
-
|
160 |
except Exception as e:
|
161 |
-
print(f"Error in
|
162 |
-
features
|
163 |
-
|
164 |
-
# Verify all required features are present
|
165 |
-
required_features = [
|
166 |
-
'pitch_mean', 'pitch_std', 'pitch_range',
|
167 |
-
'energy_mean', 'energy_std', 'energy_range',
|
168 |
-
'tempo'
|
169 |
-
]
|
170 |
-
|
171 |
-
for feature in required_features:
|
172 |
-
if feature not in features or not isinstance(features[feature], (int, float)):
|
173 |
-
print(f"Warning: Invalid or missing feature: {feature}")
|
174 |
-
features[feature] = 0.0
|
175 |
-
|
176 |
-
return features
|
177 |
-
|
178 |
-
except Exception as e:
|
179 |
-
print(f"Critical error in extract_prosodic_features: {str(e)}")
|
180 |
-
return None
|
181 |
|
182 |
-
def
|
183 |
-
|
184 |
-
|
185 |
-
This function generates interactive plots showing the various voice
|
186 |
-
characteristics including pitch, energy, and rhythm features.
|
187 |
-
|
188 |
-
Args:
|
189 |
-
features (dict): Dictionary containing the extracted voice features
|
190 |
-
|
191 |
-
Returns:
|
192 |
-
str: HTML representation of the plots or None if visualization fails
|
193 |
-
"""
|
194 |
-
try:
|
195 |
-
fig = go.Figure()
|
196 |
-
|
197 |
-
# Add pitch feature visualization
|
198 |
-
pitch_data = {
|
199 |
-
'Mean': features['pitch_mean'],
|
200 |
-
'Std Dev': features['pitch_std'],
|
201 |
-
'Range': features['pitch_range']
|
202 |
-
}
|
203 |
-
|
204 |
-
fig.add_trace(go.Bar(
|
205 |
-
name='Pitch Features (Hz)',
|
206 |
-
x=list(pitch_data.keys()),
|
207 |
-
y=list(pitch_data.values()),
|
208 |
-
marker_color='blue'
|
209 |
-
))
|
210 |
-
|
211 |
-
# Add energy feature visualization
|
212 |
-
energy_data = {
|
213 |
-
'Mean': features['energy_mean'],
|
214 |
-
'Std Dev': features['energy_std'],
|
215 |
-
'Range': features['energy_range']
|
216 |
-
}
|
217 |
-
|
218 |
-
fig.add_trace(go.Bar(
|
219 |
-
name='Energy Features',
|
220 |
-
x=[f"Energy {k}" for k in energy_data.keys()],
|
221 |
-
y=list(energy_data.values()),
|
222 |
-
marker_color='red'
|
223 |
-
))
|
224 |
-
|
225 |
-
# Add tempo indicator
|
226 |
-
fig.add_trace(go.Scatter(
|
227 |
-
name='Speech Rate (BPM)',
|
228 |
-
x=['Tempo'],
|
229 |
-
y=[features['tempo']],
|
230 |
-
mode='markers',
|
231 |
-
marker=dict(size=15, color='green')
|
232 |
-
))
|
233 |
-
|
234 |
-
# Configure layout for better visualization
|
235 |
-
fig.update_layout(
|
236 |
-
title='Voice Feature Analysis',
|
237 |
-
showlegend=True,
|
238 |
-
height=600,
|
239 |
-
barmode='group',
|
240 |
-
xaxis_title='Feature Type',
|
241 |
-
yaxis_title='Value',
|
242 |
-
template='plotly_white'
|
243 |
-
)
|
244 |
-
|
245 |
-
return fig.to_html(include_plotlyjs=True)
|
246 |
|
247 |
-
|
248 |
-
|
249 |
-
|
|
|
|
|
250 |
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
283 |
|
284 |
def analyze_audio(audio_input):
|
285 |
-
"""
|
286 |
-
|
287 |
-
This function coordinates the entire analysis pipeline including:
|
288 |
-
- Audio loading and validation
|
289 |
-
- Feature extraction
|
290 |
-
- Speech recognition
|
291 |
-
- Emotion analysis
|
292 |
-
- Visualization generation
|
293 |
-
|
294 |
-
Args:
|
295 |
-
audio_input: Audio file path or tuple containing audio data
|
296 |
-
|
297 |
-
Returns:
|
298 |
-
tuple: (analysis_summary, emotion_visualization, feature_visualization)
|
299 |
-
"""
|
300 |
try:
|
301 |
if audio_input is None:
|
302 |
return "Please provide an audio input", None, None
|
303 |
|
304 |
-
#
|
305 |
if isinstance(audio_input, tuple):
|
306 |
audio_path = audio_input[0]
|
307 |
else:
|
308 |
audio_path = audio_input
|
309 |
|
310 |
-
# Load and validate audio
|
311 |
waveform, sr = librosa.load(audio_path, sr=16000, duration=30)
|
312 |
duration = len(waveform) / sr
|
313 |
|
314 |
if duration < 0.5:
|
315 |
return "Audio too short. Please provide a recording of at least 0.5 seconds.", None, None
|
316 |
|
317 |
-
# Extract voice features
|
318 |
features = extract_prosodic_features(waveform, sr)
|
319 |
if features is None:
|
320 |
return "Error extracting voice features. Please try recording again.", None, None
|
321 |
|
322 |
-
# Create visualizations
|
323 |
feature_viz = create_feature_plots(features)
|
324 |
|
325 |
-
#
|
326 |
inputs = processor(waveform, sampling_rate=sr, return_tensors="pt").input_features
|
327 |
with torch.no_grad():
|
328 |
predicted_ids = whisper_model.generate(inputs)
|
329 |
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
|
330 |
|
331 |
-
#
|
332 |
emotion_inputs = emotion_tokenizer(
|
333 |
transcription,
|
334 |
return_tensors="pt",
|
@@ -349,7 +173,16 @@ def analyze_audio(audio_input):
|
|
349 |
|
350 |
emotion_viz = create_emotion_plot(emotion_scores)
|
351 |
|
352 |
-
# Generate
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
summary = f"""Voice Analysis Summary:
|
354 |
|
355 |
Speech Content:
|
@@ -365,6 +198,8 @@ Dominant Emotion: {max(emotion_scores.items(), key=lambda x: x[1])[0]}
|
|
365 |
Emotion Confidence: {max(emotion_scores.values()):.2%}
|
366 |
|
367 |
Recording Duration: {duration:.2f} seconds
|
|
|
|
|
368 |
"""
|
369 |
|
370 |
return summary, emotion_viz, feature_viz
|
@@ -374,13 +209,17 @@ Recording Duration: {duration:.2f} seconds
|
|
374 |
print(error_msg)
|
375 |
return error_msg, None, None
|
376 |
|
377 |
-
# Initialize the application
|
378 |
try:
|
379 |
print("===== Application Startup =====")
|
380 |
if not load_models():
|
381 |
raise RuntimeError("Failed to load required models")
|
382 |
|
383 |
-
#
|
|
|
|
|
|
|
|
|
384 |
demo = gr.Interface(
|
385 |
fn=analyze_audio,
|
386 |
inputs=gr.Audio(
|
@@ -389,13 +228,13 @@ try:
|
|
389 |
label="Audio Input (Recommended: 1-5 seconds of clear speech)"
|
390 |
),
|
391 |
outputs=[
|
392 |
-
gr.Textbox(label="Analysis Summary", lines=
|
393 |
gr.HTML(label="Emotion Analysis"),
|
394 |
gr.HTML(label="Voice Feature Analysis")
|
395 |
],
|
396 |
-
title="Voice Analysis System",
|
397 |
description="""
|
398 |
-
This application provides
|
399 |
|
400 |
1. Voice Features:
|
401 |
- Pitch analysis (fundamental frequency and variation)
|
@@ -403,13 +242,16 @@ try:
|
|
403 |
- Speech rate (words per minute)
|
404 |
- Voice quality metrics
|
405 |
|
406 |
-
2.
|
|
|
|
|
|
|
|
|
|
|
|
|
407 |
- Emotion detection (6 basic emotions)
|
408 |
- Emotional intensity analysis
|
409 |
|
410 |
-
3. Speech Content:
|
411 |
-
- Accurate text transcription
|
412 |
-
|
413 |
For optimal results:
|
414 |
- Record in a quiet environment
|
415 |
- Speak clearly and naturally
|
|
|
9 |
import warnings
|
10 |
import os
|
11 |
from scipy.stats import kurtosis, skew
|
12 |
+
from anthropic import Anthropic
|
13 |
+
from typing import Dict, Optional, Tuple
|
14 |
|
15 |
# Suppress unnecessary warnings for cleaner output
|
16 |
warnings.filterwarnings('ignore')
|
|
|
20 |
whisper_model = None
|
21 |
emotion_tokenizer = None
|
22 |
emotion_model = None
|
23 |
+
clinical_analyzer = None
|
24 |
|
25 |
+
class ClinicalVoiceAnalyzer:
|
26 |
+
"""Clinical voice analysis system using Anthropic's Claude for interpretation."""
|
27 |
|
28 |
+
def __init__(self):
|
29 |
+
"""Initialize the clinical analyzer with reference ranges and API client."""
|
30 |
+
self.anthropic = Anthropic(api_key=os.getenv('ANTHROPIC_API_KEY'))
|
31 |
+
self.model = "claude-3-opus-20240229"
|
32 |
+
self.reference_ranges = {
|
33 |
+
'pitch': {'min': 150, 'max': 400},
|
34 |
+
'tempo': {'min': 90, 'max': 130},
|
35 |
+
'energy': {'min': 0.01, 'max': 0.05}
|
36 |
+
}
|
37 |
+
print("Clinical analyzer initialized successfully")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
def analyze_voice_metrics(self, features: Dict, emotions: Dict, transcription: str) -> str:
|
40 |
+
"""Perform comprehensive clinical analysis of voice characteristics.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
+
Args:
|
43 |
+
features: Dictionary of extracted voice features
|
44 |
+
emotions: Dictionary of emotion scores
|
45 |
+
transcription: Text transcription of the speech
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
Returns:
|
48 |
+
str: Detailed clinical analysis
|
49 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
try:
|
51 |
+
prompt = self._create_analysis_prompt(features, emotions, transcription)
|
52 |
+
response = self.anthropic.messages.create(
|
53 |
+
model=self.model,
|
54 |
+
max_tokens=1000,
|
55 |
+
messages=[{
|
56 |
+
"role": "user",
|
57 |
+
"content": prompt
|
58 |
+
}]
|
59 |
)
|
60 |
+
return self._format_clinical_response(response.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
except Exception as e:
|
62 |
+
print(f"Error in clinical analysis: {str(e)}")
|
63 |
+
return self._generate_fallback_analysis(features, emotions)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
def _create_analysis_prompt(self, features: Dict, emotions: Dict, transcription: str) -> str:
|
66 |
+
"""Create a detailed prompt for clinical analysis.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
+
Constructs a comprehensive prompt that includes all relevant voice metrics,
|
69 |
+
emotional patterns, and speech content for analysis.
|
70 |
+
"""
|
71 |
+
return f"""As a clinical voice analysis expert specializing in mental health assessment,
|
72 |
+
provide a detailed psychological evaluation based on the following data:
|
73 |
|
74 |
+
Voice Characteristics:
|
75 |
+
- Pitch: {features['pitch_mean']:.2f} Hz (Normal range: {self.reference_ranges['pitch']['min']}-{self.reference_ranges['pitch']['max']} Hz)
|
76 |
+
- Pitch Variation: {features['pitch_std']:.2f} Hz
|
77 |
+
- Speech Rate: {features['tempo']:.2f} BPM (Normal range: {self.reference_ranges['tempo']['min']}-{self.reference_ranges['tempo']['max']} BPM)
|
78 |
+
- Voice Energy: {features['energy_mean']:.4f} (Normal range: {self.reference_ranges['energy']['min']}-{self.reference_ranges['energy']['max']})
|
79 |
+
|
80 |
+
Emotional Analysis:
|
81 |
+
{', '.join(f'{emotion}: {score:.1%}' for emotion, score in emotions.items())}
|
82 |
+
|
83 |
+
Speech Content:
|
84 |
+
"{transcription}"
|
85 |
+
|
86 |
+
Please provide a comprehensive clinical assessment including:
|
87 |
+
1. Analysis of voice characteristics and their psychological significance
|
88 |
+
2. Emotional state evaluation and potential underlying patterns
|
89 |
+
3. Assessment of anxiety and depression indicators
|
90 |
+
4. Stress level evaluation
|
91 |
+
5. Clinical recommendations and considerations
|
92 |
+
|
93 |
+
Present the analysis in clear sections with specific observations and clinical insights."""
|
94 |
+
|
95 |
+
def _format_clinical_response(self, analysis: str) -> str:
|
96 |
+
"""Format the clinical analysis for clear presentation."""
|
97 |
+
return f"""
|
98 |
+
Clinical Analysis:
|
99 |
+
{analysis}
|
100 |
+
"""
|
101 |
+
|
102 |
+
def _generate_fallback_analysis(self, features: Dict, emotions: Dict) -> str:
|
103 |
+
"""Generate basic analysis when API is unavailable."""
|
104 |
+
dominant_emotion = max(emotions.items(), key=lambda x: x[1])[0]
|
105 |
+
pitch_status = "elevated" if features['pitch_mean'] > self.reference_ranges['pitch']['max'] else \
|
106 |
+
"reduced" if features['pitch_mean'] < self.reference_ranges['pitch']['min'] else "normal"
|
107 |
+
|
108 |
+
return f"""
|
109 |
+
Basic Clinical Analysis (API Unavailable):
|
110 |
+
|
111 |
+
Voice Pattern Analysis:
|
112 |
+
- Pitch is {pitch_status} ({features['pitch_mean']:.2f} Hz)
|
113 |
+
- Speech rate shows {features['tempo']:.2f} BPM
|
114 |
+
- Voice energy indicates {features['energy_mean']:.4f} level
|
115 |
+
|
116 |
+
Emotional Indication:
|
117 |
+
- Primary emotional tone: {dominant_emotion}
|
118 |
+
- Confidence: {max(emotions.values()):.1%}
|
119 |
+
|
120 |
+
Note: This is a basic analysis. For detailed clinical interpretation, please ensure API connectivity.
|
121 |
+
"""
|
122 |
+
|
123 |
+
# Your existing functions (load_models, extract_prosodic_features, etc.) remain the same...
|
124 |
|
125 |
def analyze_audio(audio_input):
|
126 |
+
"""Enhanced main function with clinical analysis integration."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
try:
|
128 |
if audio_input is None:
|
129 |
return "Please provide an audio input", None, None
|
130 |
|
131 |
+
# Existing audio processing code...
|
132 |
if isinstance(audio_input, tuple):
|
133 |
audio_path = audio_input[0]
|
134 |
else:
|
135 |
audio_path = audio_input
|
136 |
|
|
|
137 |
waveform, sr = librosa.load(audio_path, sr=16000, duration=30)
|
138 |
duration = len(waveform) / sr
|
139 |
|
140 |
if duration < 0.5:
|
141 |
return "Audio too short. Please provide a recording of at least 0.5 seconds.", None, None
|
142 |
|
|
|
143 |
features = extract_prosodic_features(waveform, sr)
|
144 |
if features is None:
|
145 |
return "Error extracting voice features. Please try recording again.", None, None
|
146 |
|
|
|
147 |
feature_viz = create_feature_plots(features)
|
148 |
|
149 |
+
# Speech recognition
|
150 |
inputs = processor(waveform, sampling_rate=sr, return_tensors="pt").input_features
|
151 |
with torch.no_grad():
|
152 |
predicted_ids = whisper_model.generate(inputs)
|
153 |
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
|
154 |
|
155 |
+
# Emotion analysis
|
156 |
emotion_inputs = emotion_tokenizer(
|
157 |
transcription,
|
158 |
return_tensors="pt",
|
|
|
173 |
|
174 |
emotion_viz = create_emotion_plot(emotion_scores)
|
175 |
|
176 |
+
# Generate clinical analysis
|
177 |
+
global clinical_analyzer
|
178 |
+
if clinical_analyzer is None:
|
179 |
+
clinical_analyzer = ClinicalVoiceAnalyzer()
|
180 |
+
|
181 |
+
clinical_analysis = clinical_analyzer.analyze_voice_metrics(
|
182 |
+
features, emotion_scores, transcription
|
183 |
+
)
|
184 |
+
|
185 |
+
# Create enhanced summary with clinical insights
|
186 |
summary = f"""Voice Analysis Summary:
|
187 |
|
188 |
Speech Content:
|
|
|
198 |
Emotion Confidence: {max(emotion_scores.values()):.2%}
|
199 |
|
200 |
Recording Duration: {duration:.2f} seconds
|
201 |
+
|
202 |
+
{clinical_analysis}
|
203 |
"""
|
204 |
|
205 |
return summary, emotion_viz, feature_viz
|
|
|
209 |
print(error_msg)
|
210 |
return error_msg, None, None
|
211 |
|
212 |
+
# Initialize the application with clinical analysis capability
|
213 |
try:
|
214 |
print("===== Application Startup =====")
|
215 |
if not load_models():
|
216 |
raise RuntimeError("Failed to load required models")
|
217 |
|
218 |
+
# Initialize clinical analyzer
|
219 |
+
clinical_analyzer = ClinicalVoiceAnalyzer()
|
220 |
+
print("Clinical analyzer initialized")
|
221 |
+
|
222 |
+
# Create Gradio interface with enhanced description
|
223 |
demo = gr.Interface(
|
224 |
fn=analyze_audio,
|
225 |
inputs=gr.Audio(
|
|
|
228 |
label="Audio Input (Recommended: 1-5 seconds of clear speech)"
|
229 |
),
|
230 |
outputs=[
|
231 |
+
gr.Textbox(label="Analysis Summary", lines=15),
|
232 |
gr.HTML(label="Emotion Analysis"),
|
233 |
gr.HTML(label="Voice Feature Analysis")
|
234 |
],
|
235 |
+
title="Advanced Voice Analysis System with Clinical Interpretation",
|
236 |
description="""
|
237 |
+
This application provides comprehensive voice analysis with clinical interpretation:
|
238 |
|
239 |
1. Voice Features:
|
240 |
- Pitch analysis (fundamental frequency and variation)
|
|
|
242 |
- Speech rate (words per minute)
|
243 |
- Voice quality metrics
|
244 |
|
245 |
+
2. Clinical Analysis:
|
246 |
+
- Mental health indicators
|
247 |
+
- Emotional state evaluation
|
248 |
+
- Risk assessment
|
249 |
+
- Clinical recommendations
|
250 |
+
|
251 |
+
3. Emotional Content:
|
252 |
- Emotion detection (6 basic emotions)
|
253 |
- Emotional intensity analysis
|
254 |
|
|
|
|
|
|
|
255 |
For optimal results:
|
256 |
- Record in a quiet environment
|
257 |
- Speak clearly and naturally
|