taesiri commited on
Commit
8cc7679
·
1 Parent(s): 24afd8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -12
app.py CHANGED
@@ -2,11 +2,69 @@ import gradio as gr
2
  import json
3
  import numpy as np
4
  import datasets
 
 
5
 
6
- bugs_ds = datasets.load_dataset("asgaardlab/SampleDataset", split="validation")
 
7
 
8
 
9
- def generate_annotations(image_index):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  image_index = int(image_index)
11
  objects_json = bugs_ds[image_index]["Objects JSON (Correct)"]
12
  objects = json.loads(objects_json)
@@ -25,40 +83,83 @@ def generate_annotations(image_index):
25
  victim_name = bugs_ds[image_index]["Victim Name"]
26
  bug_type = bugs_ds[image_index]["Tag"]
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  return (
 
 
29
  (bugs_ds[image_index]["Correct Image"], annotations),
 
30
  objects,
31
  object_count,
32
  victim_name,
33
  bug_type,
34
  )
35
 
 
 
 
36
 
37
  # Setting up the Gradio interface using blocks API
38
  with gr.Blocks() as demo:
39
  gr.Markdown(
40
  "Enter the image index and click **Submit** to view the segmentation annotations."
41
  )
 
42
  with gr.Row():
43
- inp = gr.Slider(
44
- minimum=0, maximum=len(bugs_ds) - 1, step=1, label="Image Index"
 
45
  )
46
- btn = gr.Button("Submit")
 
 
 
47
  with gr.Row():
48
- with gr.Column():
49
- object_count = gr.Number(label="Object Count")
50
- victim_name = gr.Textbox(label="Victim Name")
51
- bug_type = gr.Textbox(label="Bug Type")
52
-
53
  seg_img = gr.AnnotatedImage()
 
 
 
 
 
54
 
55
  with gr.Row():
56
  json_data = gr.JSON()
57
 
58
  btn.click(
59
  fn=generate_annotations,
60
- inputs=inp,
61
- outputs=[seg_img, json_data, object_count, victim_name, bug_type],
 
 
 
 
 
 
62
  )
63
 
64
  demo.launch()
 
2
  import json
3
  import numpy as np
4
  import datasets
5
+ import cv2
6
+ import matplotlib.pyplot as plt
7
 
8
+ sample_dataset1 = datasets.load_dataset("asgaardlab/SampleDataset", split="validation")
9
+ sample_dataset2 = datasets.load_dataset("asgaardlab/SampleDataset2", split="validation")
10
 
11
 
12
+
13
+
14
+ def overlay_with_transparency(background, overlay, alpha_mask):
15
+ """
16
+ Overlay a semi-transparent image on top of another image.
17
+
18
+ Args:
19
+ - background: The image on which the overlay will be added.
20
+ - overlay: The image to overlay.
21
+ - alpha_mask: The mask specifying transparency levels.
22
+ """
23
+ return cv2.addWeighted(background, 1, overlay, alpha_mask, 0)
24
+
25
+ def generate_overlay_image(buggy_image, objects, segmentation_image_rgb, font_scale=0.5, font_color=(0, 255, 255)):
26
+ """
27
+ Generate an overlaid image using the provided annotations.
28
+
29
+ Args:
30
+ - buggy_image: The image to be overlaid.
31
+ - objects: The JSON object details.
32
+ - segmentation_image_rgb: The segmentation image.
33
+ - font_scale: Scale factor for the font size.
34
+ - font_color: Color for the font in BGR format.
35
+
36
+ Returns:
37
+ - The overlaid image.
38
+ """
39
+ overlaid_img = buggy_image.copy()
40
+
41
+ for obj in objects:
42
+ # Get the mask for this object
43
+ color = tuple(obj["color"])[:-1]
44
+ mask = np.all(segmentation_image_rgb[:, :, :3] == np.array(color), axis=-1).astype(np.float32)
45
+
46
+ # Create a colored version of the mask using the object's color
47
+ colored_mask = np.zeros_like(overlaid_img)
48
+ colored_mask[mask == 1] = color
49
+
50
+ # Overlay the colored mask onto the original image with 0.3 transparency
51
+ overlaid_img = overlay_with_transparency(overlaid_img, colored_mask, 0.3)
52
+
53
+ # Find the center of the mask to place the label
54
+ mask_coords = np.argwhere(mask)
55
+ y_center, x_center = np.mean(mask_coords, axis=0).astype(int)
56
+
57
+ # Draw the object's name at the center with specified font size and color
58
+ cv2.putText(overlaid_img, obj["labelName"], (x_center, y_center),
59
+ cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_color, 1, cv2.LINE_AA)
60
+
61
+ return overlaid_img
62
+
63
+
64
+
65
+ def generate_annotations(selected_dataset, image_index):
66
+ bugs_ds = sample_dataset1 if selected_dataset == 'Western Scene' else sample_dataset2
67
+
68
  image_index = int(image_index)
69
  objects_json = bugs_ds[image_index]["Objects JSON (Correct)"]
70
  objects = json.loads(objects_json)
 
83
  victim_name = bugs_ds[image_index]["Victim Name"]
84
  bug_type = bugs_ds[image_index]["Tag"]
85
 
86
+ bug_image = bugs_ds[image_index]["Buggy Image"]
87
+ correct_image = bugs_ds[image_index]["Correct Image"]
88
+
89
+
90
+
91
+
92
+
93
+ # # Load a single image sample from the first dataset for demonstration
94
+ # image_sample = sample_dataset1[0]
95
+
96
+ # # Extract annotations for this image sample
97
+ # objects_json = image_sample["Objects JSON (Correct)"]
98
+ # objects = json.loads(objects_json)
99
+ # segmentation_image_rgb = np.array(image_sample["Segmentation Image (Correct)"])
100
+
101
+ # # Generate the overlaid image with custom font size and color
102
+ # overlaid_image = generate_overlay_image(np.array(image_sample["Buggy Image"]), objects, segmentation_image_rgb, font_scale=0.7, font_color=(255, 0, 0))
103
+
104
+ # # Display the overlaid image
105
+ # plt.imshow(overlaid_image)
106
+ # plt.axis('off')
107
+ # plt.show()
108
+
109
+ overlaid_image = generate_overlay_image(np.array(bugs_ds[image_index]["Buggy Image"]), objects, segmentation_image_rgb)
110
+
111
+
112
  return (
113
+ bug_image,
114
+ correct_image,
115
  (bugs_ds[image_index]["Correct Image"], annotations),
116
+ overlaid_image,
117
  objects,
118
  object_count,
119
  victim_name,
120
  bug_type,
121
  )
122
 
123
+ def update_slider(selected_dataset):
124
+ dataset = sample_dataset1 if selected_dataset == 'Western Scene' else sample_dataset2
125
+ return gr.update(minimum=0, maximum=len(dataset) - 1, step=1)
126
 
127
  # Setting up the Gradio interface using blocks API
128
  with gr.Blocks() as demo:
129
  gr.Markdown(
130
  "Enter the image index and click **Submit** to view the segmentation annotations."
131
  )
132
+
133
  with gr.Row():
134
+ selected_dataset = gr.Dropdown(['Western Scene', 'Viking Village'], label="Dataset")
135
+ input_slider = gr.Slider(
136
+ minimum=0, maximum=1, step=1, label="Image Index"
137
  )
138
+ btn = gr.Button("Visualize")
139
+ with gr.Row():
140
+ bug_image = gr.Image()
141
+ correct_image = gr.Image()
142
  with gr.Row():
 
 
 
 
 
143
  seg_img = gr.AnnotatedImage()
144
+ overlaid_img = gr.Image()
145
+ with gr.Row():
146
+ object_count = gr.Number(label="Object Count")
147
+ victim_name = gr.Textbox(label="Victim Name")
148
+ bug_type = gr.Textbox(label="Bug Type")
149
 
150
  with gr.Row():
151
  json_data = gr.JSON()
152
 
153
  btn.click(
154
  fn=generate_annotations,
155
+ inputs=[selected_dataset, input_slider],
156
+ outputs=[bug_image, correct_image, seg_img, overlaid_img, json_data, object_count, victim_name, bug_type],
157
+ )
158
+
159
+ selected_dataset.change(
160
+ fn=update_slider,
161
+ inputs=[selected_dataset],
162
+ outputs=[input_slider]
163
  )
164
 
165
  demo.launch()