Mattral commited on
Commit
a439a61
·
verified ·
1 Parent(s): 57a3fb7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +140 -56
app.py CHANGED
@@ -9,6 +9,7 @@ import torch.nn as nn
9
  import torch.nn.functional as F
10
  import matplotlib.pyplot as plt
11
  import plotly.express as px
 
12
 
13
  # Dummy CNN Model
14
  class SimpleCNN(nn.Module):
@@ -219,95 +220,178 @@ if uploaded_file is not None:
219
  st.subheader("CNN Processing Visualization")
220
  activations, magnitude_tensor = pass_to_cnn(st.session_state.filtered_fft[0])
221
 
222
- # Display input tensor
223
- st.write("### Input Magnitude Tensor:")
224
- st.image(magnitude_tensor.squeeze().numpy(),
225
- caption="Magnitude Tensor",
226
- use_column_width=True,
227
- clamp=True)
 
228
 
229
- # Display activations with improved visualization
230
  st.write("### First Convolution Layer Activations")
231
  activation = activations.detach().numpy()
232
 
233
  if len(activation.shape) == 4:
234
- # Create a grid of activation maps
235
- cols = 4 # Number of columns in the grid
236
- rows = 4 # 16 channels / 4 columns = 4 rows
 
237
  fig, axs = plt.subplots(rows, cols, figsize=(20, 20))
238
 
239
  for i in range(activation.shape[1]):
240
- act_img = activation[0, i, :, :]
241
  ax = axs[i//cols, i%cols]
242
- ax.imshow(act_img, cmap='viridis')
 
 
243
  ax.set_title(f'Channel {i+1}')
244
- ax.axis('off')
245
 
 
246
  st.pyplot(fig)
247
 
248
- # Display sample activation values
249
- st.write("### Activation Values Sample")
250
- sample_activation = activation[0, 0, :10, :10] # First 10x10 values
251
- st.dataframe(pd.DataFrame(sample_activation))
252
-
253
- # Additional Steps After Activation Channels
 
 
 
 
 
 
254
  st.markdown("---")
255
- st.subheader("Next Processing Steps in CNN")
256
-
257
- # Step 2: Second Convolution Layer Visualization
258
- st.write("### Second Convolution Layer Features")
259
  with torch.no_grad():
260
  model = SimpleCNN()
261
- output, activations = model(magnitude_tensor)
262
- second_conv = model.conv2(activations).detach().numpy()
263
 
264
  if len(second_conv.shape) == 4:
265
- cols = 8 # 32 channels / 8 columns = 4 rows
 
 
266
  rows = 4
267
  fig2, axs2 = plt.subplots(rows, cols, figsize=(20, 10))
268
 
269
- for i in range(second_conv.shape[1]):
270
- act_img = second_conv[0, i, :, :]
271
  ax = axs2[i//cols, i%cols]
272
- ax.imshow(act_img, cmap='plasma')
273
- ax.set_title(f'Channel {i+1}')
 
 
274
  ax.axis('off')
275
 
 
276
  st.pyplot(fig2)
277
-
278
- # Step 3: Pooling Layer Visualization
279
- st.write("### Adaptive Average Pooling Output")
 
280
  with torch.no_grad():
281
  pooled = F.adaptive_avg_pool2d(torch.tensor(second_conv), (8, 8)).numpy()
282
 
283
- st.write("Pooled Features Shape:", pooled.shape)
 
 
 
 
 
 
 
 
 
 
 
284
 
285
- # Normalize and display pooled features
286
- pooled_sample = pooled[0, 0]
287
- pooled_normalized = (pooled_sample - pooled_sample.min()) / (pooled_sample.max() - pooled_sample.min())
288
- st.image(pooled_normalized,
289
- caption="Sample Pooled Feature Map",
290
- use_container_width=True,
291
- clamp=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
 
293
- # Step 4: Final Classification
294
- st.write("### Final Classification Scores")
 
 
 
 
 
 
 
 
 
295
  with torch.no_grad():
296
  model = SimpleCNN()
297
  output, _ = model(magnitude_tensor)
298
- scores = F.softmax(output, dim=1).numpy()
299
-
300
  classes = [f"Class {i}" for i in range(10)]
301
- fig3 = px.bar(x=classes, y=scores[0], title="Classification Probabilities")
302
- st.plotly_chart(fig3)
303
 
304
- # Step 5: Full Process Explanation
 
 
 
 
 
 
 
 
 
305
  st.markdown("""
306
- #### Processing Pipeline:
307
- 1. Input Magnitude Spectrum →
308
- 2. Conv1 Features (16 channels) →
309
- 3. Conv2 Features (32 channels) →
310
- 4. Pooled Features →
311
- 5. Fully Connected Layers
312
- 6. Final Classification
313
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  import torch.nn.functional as F
10
  import matplotlib.pyplot as plt
11
  import plotly.express as px
12
+ import seaborn as sns
13
 
14
  # Dummy CNN Model
15
  class SimpleCNN(nn.Module):
 
220
  st.subheader("CNN Processing Visualization")
221
  activations, magnitude_tensor = pass_to_cnn(st.session_state.filtered_fft[0])
222
 
223
+ # Display input tensor with improved visualization
224
+ st.write("### Input Magnitude Tensor")
225
+ fig_input, ax_input = plt.subplots(figsize=(8, 8))
226
+ input_img = magnitude_tensor.squeeze().numpy()
227
+ im = ax_input.imshow(input_img, cmap='viridis')
228
+ plt.colorbar(im, ax=ax_input)
229
+ st.pyplot(fig_input)
230
 
231
+ # Display activation maps with proper normalization
232
  st.write("### First Convolution Layer Activations")
233
  activation = activations.detach().numpy()
234
 
235
  if len(activation.shape) == 4:
236
+ # Create grid layout for activation maps
237
+ st.write("#### Activation Maps Visualization")
238
+ cols = 4
239
+ rows = 4
240
  fig, axs = plt.subplots(rows, cols, figsize=(20, 20))
241
 
242
  for i in range(activation.shape[1]):
 
243
  ax = axs[i//cols, i%cols]
244
+ act_img = activation[0, i, :, :]
245
+ vmin, vmax = np.percentile(act_img, [1, 99]) # Robust normalization
246
+ im = ax.imshow(act_img, cmap='inferno', vmin=vmin, vmax=vmax)
247
  ax.set_title(f'Channel {i+1}')
248
+ fig.colorbar(im, ax=ax)
249
 
250
+ plt.tight_layout()
251
  st.pyplot(fig)
252
 
253
+ # Display activation statistics
254
+ st.write("#### Activation Value Distribution")
255
+ flat_activations = activation.flatten()
256
+ fig_hist = px.histogram(
257
+ x=flat_activations,
258
+ nbins=100,
259
+ title="Activation Value Distribution",
260
+ labels={'x': 'Activation Value'}
261
+ )
262
+ st.plotly_chart(fig_hist)
263
+
264
+ # Second Convolution Layer Visualization
265
  st.markdown("---")
266
+ st.subheader("Second Convolution Layer Features")
 
 
 
267
  with torch.no_grad():
268
  model = SimpleCNN()
269
+ _, first_conv = model(magnitude_tensor)
270
+ second_conv = model.conv2(first_conv).detach().numpy()
271
 
272
  if len(second_conv.shape) == 4:
273
+ # Display sample feature maps
274
+ st.write("#### Feature Maps Visualization")
275
+ cols = 8
276
  rows = 4
277
  fig2, axs2 = plt.subplots(rows, cols, figsize=(20, 10))
278
 
279
+ for i in range(32): # For all 32 channels
 
280
  ax = axs2[i//cols, i%cols]
281
+ feature_map = second_conv[0, i, :, :]
282
+ vmin, vmax = np.percentile(feature_map, [1, 99])
283
+ im = ax.imshow(feature_map, cmap='plasma', vmin=vmin, vmax=vmax)
284
+ ax.set_title(f'FM {i+1}')
285
  ax.axis('off')
286
 
287
+ plt.tight_layout()
288
  st.pyplot(fig2)
289
+
290
+ # Pooling Layer Visualization
291
+ st.markdown("---")
292
+ st.subheader("Pooling Layer Output")
293
  with torch.no_grad():
294
  pooled = F.adaptive_avg_pool2d(torch.tensor(second_conv), (8, 8)).numpy()
295
 
296
+ st.write("#### Pooled Features Dimensionality Reduction")
297
+
298
+ # Create a heatmap using seaborn
299
+ fig_pool, ax_pool = plt.subplots(figsize=(10, 6))
300
+ sns.heatmap(
301
+ pooled[0, 0], # Use the first channel of the pooled features
302
+ annot=True, # Show values in each cell
303
+ fmt=".2f", # Format values to 2 decimal places
304
+ cmap="coolwarm",# Use a color map for better visualization
305
+ ax=ax_pool # Plot on the created axis
306
+ )
307
+ st.pyplot(fig_pool)
308
 
309
+ # Create a grid of pooled feature maps
310
+ cols = 4
311
+ rows = 2
312
+ fig, axs = plt.subplots(rows, cols, figsize=(20, 10))
313
+
314
+ for i in range(rows * cols):
315
+ ax = axs[i // cols, i % cols]
316
+ sns.heatmap(
317
+ pooled[0, i],
318
+ annot=True,
319
+ fmt=".2f",
320
+ cmap="coolwarm",
321
+ ax=ax
322
+ )
323
+ ax.set_title(f"Channel {i+1}")
324
+
325
+ plt.tight_layout()
326
+ st.pyplot(fig)
327
+
328
+ # Fully Connected Layer Visualization
329
+ st.markdown("---")
330
+ st.subheader("Fully Connected Layer Analysis")
331
+ with torch.no_grad():
332
+ model = SimpleCNN()
333
+ flattened = model.conv2(model.conv1(magnitude_tensor))
334
+ flattened = F.adaptive_avg_pool2d(flattened, (8, 8))
335
+ flattened = flattened.view(flattened.size(0), -1)
336
+ fc_output = model.fc1(flattened).detach().numpy()
337
 
338
+ st.write("#### FC Layer Activation Patterns")
339
+ fig_fc = px.imshow(
340
+ fc_output.T,
341
+ labels=dict(x="Neurons", y="Features", color="Activation"),
342
+ color_continuous_scale="viridis"
343
+ )
344
+ st.plotly_chart(fig_fc)
345
+
346
+ # Final Classification Visualization
347
+ st.markdown("---")
348
+ st.subheader("Final Classification Results")
349
  with torch.no_grad():
350
  model = SimpleCNN()
351
  output, _ = model(magnitude_tensor)
352
+ probabilities = F.softmax(output, dim=1).numpy()[0]
353
+
354
  classes = [f"Class {i}" for i in range(10)]
355
+ df = pd.DataFrame({"Class": classes, "Probability": probabilities})
 
356
 
357
+ fig_class = px.bar(
358
+ df,
359
+ x="Class",
360
+ y="Probability",
361
+ color="Probability",
362
+ color_continuous_scale="tealrose"
363
+ )
364
+ st.plotly_chart(fig_class)
365
+
366
+ # Full Pipeline Explanation
367
  st.markdown("""
368
+ ### Complete Processing Pipeline
369
+ <div style="
370
+ background-color: #f0f2f6;
371
+ padding: 30px;
372
+ border-radius: 15px;
373
+ box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
374
+ font-family: 'Arial', sans-serif;
375
+ font-size: 16px;
376
+ color: #333;
377
+ border: 1px solid #dcdcdc;
378
+ ">
379
+ <ul style="list-style-type: none; padding-left: 0;">
380
+ <li><strong>1. Input Preparation:</strong> Magnitude spectrum from FFT</li>
381
+ <li><strong>2. Feature Extraction:</strong>
382
+ <ul>
383
+ <li>- Conv1: 16 filters (3x3)</li>
384
+ <li>- Conv2: 32 filters (3x3)</li>
385
+ </ul>
386
+ </li>
387
+ <li><strong>3. Dimensionality Reduction:</strong> Adaptive average pooling (8x8)</li>
388
+ <li><strong>4. Feature Transformation:</strong>
389
+ <ul>
390
+ <li>- Flattening: 32×8×8 → 2048 features</li>
391
+ <li>- FC1: 2048 → 128 dimensions</li>
392
+ </ul>
393
+ </li>
394
+ <li><strong>5. Classification:</strong> FC2: 128 → 10 classes</li>
395
+ </ul>
396
+ </div>
397
+ """, unsafe_allow_html=True)