Gil-Simas commited on
Commit
f4caf43
·
1 Parent(s): 37b7c0b

realization of metrics

Browse files
Files changed (1) hide show
  1. user-friendly-metrics.py +33 -16
user-friendly-metrics.py CHANGED
@@ -190,10 +190,19 @@ def calculate_from_payload(payload: dict,
190
  print("models: ", models)
191
  print("sequence_list: ", sequence_list)
192
 
193
- output = {}
 
 
 
 
 
 
 
 
 
194
 
195
  for sequence in sequence_list:
196
- output[sequence] = {}
197
  frames = payload['sequences'][sequence][gt_field_name]
198
 
199
  all_formated_references = {"all": []}
@@ -216,7 +225,6 @@ def calculate_from_payload(payload: dict,
216
  if filter_value >= filter_range_limits[0] and filter_value <= filter_range_limits[1]:
217
  all_formated_references[filter][filter_range_name].append([frame_id+1, index, x, y, w, h])
218
 
219
-
220
  for model in models:
221
  frames = payload['sequences'][sequence][model]
222
  formated_predictions = []
@@ -233,24 +241,33 @@ def calculate_from_payload(payload: dict,
233
  print("formated_predictions: ", formated_predictions)
234
  print("formated_references: ", all_formated_references)
235
  if len(formated_predictions) == 0:
236
- output[sequence][model] = "Model had no predictions."
237
  elif len(all_formated_references["all"]) == 0:
238
- output[sequence][model] = "No ground truth."
239
  else:
240
- output[sequence][model] = {}
241
- output[sequence][model]["all"] = calculate(formated_predictions, all_formated_references["all"], max_iou=max_iou, recognition_thresholds = recognition_thresholds)
 
 
 
 
 
 
 
242
  for filter, filter_ranges in filters.items():
243
- output[sequence][model][filter] = {}
244
  for filter_range in filter_ranges:
 
245
  filter_range_name = filter_range[0]
246
- output[sequence][model][filter][filter_range_name] = calculate(formated_predictions, all_formated_references[filter][filter_range_name], max_iou=max_iou, recognition_thresholds = recognition_thresholds)
 
 
247
 
248
- global_and_per_sequence_output = {}
249
- global_and_per_sequence_output["global"] = per_sequence_to_global(output)
250
- global_and_per_sequence_output["per_sequence"] = output
251
 
252
- return global_and_per_sequence_output
253
-
254
  def sum_dicts(dict1, dict2):
255
  """
256
  Recursively sums the numerical values in two nested dictionaries.
@@ -272,13 +289,13 @@ def sum_dicts(dict1, dict2):
272
 
273
  def realize_metrics(metrics_dict,
274
  recognition_thresholds):
275
- print(metrics_dict)
276
  metrics_dict["precision"] = metrics_dict["tp"]/(metrics_dict["tp"]+metrics_dict["fp"])
277
  metrics_dict["recall"] = metrics_dict["tp"]/(metrics_dict["tp"]+metrics_dict["fn"])
278
  metrics_dict["f1"] = 2*metrics_dict["precision"]*metrics_dict["recall"]/(metrics_dict["precision"]+metrics_dict["recall"])
279
 
280
  for th in recognition_thresholds:
281
- metrics_dict[f"recognition_{th}"] = metrics_dict[f"recognized_{th}"]/metrics_dict["num_gt_ids"]
282
 
283
  return metrics_dict
284
 
 
190
  print("models: ", models)
191
  print("sequence_list: ", sequence_list)
192
 
193
+ metrics_per_sequence = {}
194
+ metrics_global = {}
195
+ for model in models:
196
+ metrics_global[model] = {}
197
+ metrics_global[model]["all"] = {}
198
+ for filter, filter_ranges in filters.items():
199
+ metrics_global[model][filter] = {}
200
+ for filter_range in filter_ranges:
201
+ filter_range_name = filter_range[0]
202
+ metrics_global[model][filter][filter_range_name] = {}
203
 
204
  for sequence in sequence_list:
205
+ metrics_per_sequence[sequence] = {}
206
  frames = payload['sequences'][sequence][gt_field_name]
207
 
208
  all_formated_references = {"all": []}
 
225
  if filter_value >= filter_range_limits[0] and filter_value <= filter_range_limits[1]:
226
  all_formated_references[filter][filter_range_name].append([frame_id+1, index, x, y, w, h])
227
 
 
228
  for model in models:
229
  frames = payload['sequences'][sequence][model]
230
  formated_predictions = []
 
241
  print("formated_predictions: ", formated_predictions)
242
  print("formated_references: ", all_formated_references)
243
  if len(formated_predictions) == 0:
244
+ metrics_per_sequence[sequence][model] = "Model had no predictions."
245
  elif len(all_formated_references["all"]) == 0:
246
+ metrics_per_sequence[sequence][model] = "No ground truth."
247
  else:
248
+ metrics_per_sequence[sequence][model] = {}
249
+
250
+ sequence_metrics = calculate(formated_predictions, all_formated_references["all"], max_iou=max_iou, recognition_thresholds = recognition_thresholds)
251
+ sequence_metrics = realize_metrics(sequence_metrics, recognition_thresholds)
252
+ metrics_per_sequence[sequence][model]["all"] = sequence_metrics
253
+
254
+ metrics_global[model]["all"] = sum_dicts(metrics_global[model], sequence_metrics)
255
+ metrics_global[model]["all"] = realize_metrics(metrics_global[model]["all"], recognition_thresholds)
256
+
257
  for filter, filter_ranges in filters.items():
258
+ metrics_per_sequence[sequence][model][filter] = {}
259
  for filter_range in filter_ranges:
260
+
261
  filter_range_name = filter_range[0]
262
+ sequence_metrics = calculate(formated_predictions, all_formated_references[filter][filter_range_name], max_iou=max_iou, recognition_thresholds = recognition_thresholds)
263
+ sequence_metrics = realize_metrics(sequence_metrics, recognition_thresholds)
264
+ metrics_per_sequence[sequence][model][filter][filter_range_name] = sequence_metrics
265
 
266
+ metrics_global[model][filter][filter_range_name] = sum_dicts(metrics_global[model][filter][filter_range_name], sequence_metrics)
267
+ metrics_global[model][filter][filter_range_name] = realize_metrics(metrics_global[model][filter][filter_range_name], recognition_thresholds)
 
268
 
269
+ return metrics_per_sequence
270
+
271
  def sum_dicts(dict1, dict2):
272
  """
273
  Recursively sums the numerical values in two nested dictionaries.
 
289
 
290
  def realize_metrics(metrics_dict,
291
  recognition_thresholds):
292
+
293
  metrics_dict["precision"] = metrics_dict["tp"]/(metrics_dict["tp"]+metrics_dict["fp"])
294
  metrics_dict["recall"] = metrics_dict["tp"]/(metrics_dict["tp"]+metrics_dict["fn"])
295
  metrics_dict["f1"] = 2*metrics_dict["precision"]*metrics_dict["recall"]/(metrics_dict["precision"]+metrics_dict["recall"])
296
 
297
  for th in recognition_thresholds:
298
+ metrics_dict[f"recognition_{th}"] = metrics_dict[f"recognized_{th}"]/metrics_dict["unique_gt_ids"]
299
 
300
  return metrics_dict
301