pawlo2013 commited on
Commit
6bae70d
·
1 Parent(s): 0eadccf

let us fix it

Browse files
Files changed (1) hide show
  1. app.py +11 -8
app.py CHANGED
@@ -34,8 +34,10 @@ def classify_and_visualize(img, device="cpu", discard_ratio=0.9, head_fusion="me
34
  img = img.convert("RGB")
35
  processed_input = processor(images=img, return_tensors="pt").to(device)
36
 
 
 
37
  with torch.no_grad():
38
- outputs = model(**processed_input)
39
  logits = outputs.logits
40
  probabilities = torch.softmax(logits, dim=1)[0].tolist()
41
  prediction = torch.argmax(logits, dim=-1).item()
@@ -45,7 +47,7 @@ def classify_and_visualize(img, device="cpu", discard_ratio=0.9, head_fusion="me
45
 
46
  # Generate attention heatmap
47
  heatmap_img = show_final_layer_attention_maps(
48
- model, processed_input, device, discard_ratio, head_fusion
49
  )
50
 
51
  return {"probabilities": result, "heatmap": heatmap_img}
@@ -66,16 +68,17 @@ def load_examples_from_folder(folder_path):
66
 
67
  # Function to show final layer attention maps
68
  def show_final_layer_attention_maps(
69
- model, tensor, device, discard_ratio=0.6, head_fusion="max", only_last_layer=False
 
 
 
 
 
70
  ):
71
- image = tensor["pixel_values"].to(device).squeeze(0)
72
 
73
  with torch.no_grad():
74
- outputs = model(**tensor, output_attentions=True)
75
 
76
- # if outputs.attentions[0] is None:
77
- # print("Attention outputs are None.")
78
- # return None
79
 
80
  image = image - image.min()
81
  image = image / image.max()
 
34
  img = img.convert("RGB")
35
  processed_input = processor(images=img, return_tensors="pt").to(device)
36
 
37
+ processed_input = processed_input["pixel_values"].to(device)
38
+
39
  with torch.no_grad():
40
+ outputs = model(processed_input, output_attentions=True)
41
  logits = outputs.logits
42
  probabilities = torch.softmax(logits, dim=1)[0].tolist()
43
  prediction = torch.argmax(logits, dim=-1).item()
 
47
 
48
  # Generate attention heatmap
49
  heatmap_img = show_final_layer_attention_maps(
50
+ outputs, processed_input, device, discard_ratio, head_fusion
51
  )
52
 
53
  return {"probabilities": result, "heatmap": heatmap_img}
 
68
 
69
  # Function to show final layer attention maps
70
  def show_final_layer_attention_maps(
71
+ outputs,
72
+ processed_input,
73
+ device,
74
+ discard_ratio=0.6,
75
+ head_fusion="max",
76
+ only_last_layer=False,
77
  ):
 
78
 
79
  with torch.no_grad():
 
80
 
81
+ image = processed_input.squeeze(0)
 
 
82
 
83
  image = image - image.min()
84
  image = image / image.max()