diff --git a/scripts/metrics/compute_overall_miou.py b/scripts/metrics/compute_overall_miou.py deleted file mode 100644 index a015f4b..0000000 --- a/scripts/metrics/compute_overall_miou.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (c) 2022-2024, InterDigital Communications, Inc -# All rights reserved. - -# Redistribution and use in source and binary forms, with or without -# modification, are permitted (subject to the limitations in the disclaimer -# below) provided that the following conditions are met: - -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * Neither the name of InterDigital Communications, Inc nor the names of its -# contributors may be used to endorse or promote products derived from this -# software without specific prior written permission. - -# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY -# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND -# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT -# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -r""" -Compute overall MOT over some sequences outputs - - - -""" - -from __future__ import annotations - -import argparse -import csv -import json - -import compute_overall_mot -import utils - -from compressai_vision.evaluators.evaluators import BaseEvaluator - -CLASSES = ["PANDAM1", "PANDAM2", "PANDAM2"] - -SEQS_BY_CLASS = { - CLASSES[0]: [ - "PANDA057", - "PANDA058", - "PANDA069", - "PANDA070", - "PANDA072", - "PANDA073", - "PANDA077", - ], - CLASSES[1]: [ - "PANDA003", - "PANDA011", - "PANDA016", - "PANDA017", - "PANDA021", - "PANDA023", - "PANDA027", - "PANDA029", - "PANDA030", - "PANDA033", - "PANDA035", - "PANDA037", - "PANDA039", - "PANDA043", - "PANDA053", - "PANDA056", - "PANDA097", - ], - CLASSES[2]: [ - "PANDA088", - "PANDA089", - "PANDA090", - "PANDA095", - "PANDA109", - "PANDA112", - "PANDA113", - "PANDA115", - "PANDA117", - "PANDA119", - "PANDA122", - "PANDA124", - ], -} - - -def compute_overall_mIoU(class_name, items): - miou_acc = 0.0 - for item in items: - with open(item["eval_info"], "r") as f: - results = json.load(f) - miou_acc += results["mIoU"] - - miou_acc = miou_acc / len(items) - - return miou_acc - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "-r", - "--result_path", - required=True, - help="For example, '.../logs/runs/[pipeline]/[codec]/[datacatalog]/' ", - ) - parser.add_argument( - "-q", - "--quality_index", - required=False, - default=-1, - type=int, - help="Provide index of quality folders under the `result_path'. quality_index is only meant to point the orderd folders by qp names because there might be different range of qps are used for different sequences", - ) - parser.add_argument( - "-a", - "--all_qualities", - action="store_true", - help="run all 6 rate points in MPEG CTCs", - ) - parser.add_argument( - "-d", - "--dataset_path", - required=True, - help="For example, '.../vcm_testdata/[dataset]' ", - ) - parser.add_argument( - "-c", - "--class_to_compute", - type=str, - choices=CLASSES, - required=True, - ) - - args = parser.parse_args() - if args.all_qualities: - qualities = range(0, 6) - else: - qualities = [args.quality_index] - - with open( - f"{args.result_path}/{args.class_to_compute}.csv", "w", newline="" - ) as file: - writer = csv.writer(file) - for q in qualities: - items = utils.search_items( - args.result_path, - args.dataset_path, - q, - SEQS_BY_CLASS[args.class_to_compute], - BaseEvaluator.get_jde_eval_info_name, - ) - - assert ( - len(items) > 0 - ), "Nothing relevant information found from given directories..." - - summary, names = compute_overall_mot.compute_overall_mota( - args.class_to_compute, items - ) - - motas = [100.0 * sv[13] for sv in summary.values] - - print(f"{'=' * 10} FINAL OVERALL MOTA SUMMARY {'=' * 10}") - print(f"{'-' * 35} : MOTA") - - for key, val in zip(names, motas): - print(f"{str(key):35} : {val:.4f}%") - if key == "Overall": - writer.writerow([str(q), f"{val:.4f}"]) - print("\n") diff --git a/scripts/metrics/compute_overall_map.py b/scripts/metrics/compute_per_class_map.py similarity index 54% rename from scripts/metrics/compute_overall_map.py rename to scripts/metrics/compute_per_class_map.py index e2d57ef..1db0ee8 100644 --- a/scripts/metrics/compute_overall_map.py +++ b/scripts/metrics/compute_per_class_map.py @@ -36,13 +36,9 @@ from __future__ import annotations -import argparse -import csv import json import os -from typing import Any, List - import numpy as np import pandas as pd import utils @@ -52,59 +48,41 @@ from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval -from compressai_vision.evaluators.evaluators import BaseEvaluator - -CLASSES = ["CLASS-AB", "CLASS-C", "CLASS-D", "CLASS-AB*"] - -SEQS_BY_CLASS = { - CLASSES[0]: [ - "Traffic", - "Kimono", - "ParkScene", - "Cactus", - "BasketballDrive", - "BQTerrace", - ], - CLASSES[1]: ["BasketballDrill", "BQMall", "PartyScene", "RaceHorses_832x480"], - CLASSES[2]: ["BasketballPass", "BQSquare", "BlowingBubbles", "RaceHorses"], - CLASSES[3]: ["Traffic", "BQTerrace"], -} - SEQUENCE_TO_OFFSET = { - "Traffic": 10000, - "Kimono": 20000, - "ParkScene": 30000, - "Cactus": 40000, - "BasketballDrive": 50000, - "BQTerrace": 60000, - "BasketballDrill": 70000, - "BQMall": 80000, - "PartyScene": 90000, - "RaceHorses_832x480": 100000, - "BasketballPass": 110000, - "BQSquare": 120000, - "BlowingBubbles": 130000, - "RaceHorses": 140000, + "Traffic_2560x1600_30": 10000, + "Kimono_1920x1080_24": 20000, + "ParkScene_1920x1080_24": 30000, + "Cactus_1920x1080_50": 40000, + "BasketballDrive_1920x1080_50": 50000, + "BQTerrace_1920x1080_60": 60000, + "BasketballDrill_832x480_50": 70000, + "BQMall_832x480_60": 80000, + "PartyScene_832x480_50": 90000, + "RaceHorses_832x480_30": 100000, + "BasketballPass_416x240_50": 110000, + "BQSquare_416x240_60": 120000, + "BlowingBubbles_416x240_50": 130000, + "RaceHorses_416x240_30": 140000, } TMP_EVAL_FILE = "tmp_eval.json" TMP_ANCH_FILE = "tmp_anch.json" +NS_SEQ_PREFIX = "ns_" # Prefix of non-scaled sequences -def compute_overall_mAP(class_name, items, no_cactus=False): - seq_root_names = SEQS_BY_CLASS[class_name] - - if no_cactus and class_name == "CLASS-AB": - if "Cactus" in seq_root_names: - seq_root_names.remove("Cactus") +def compute_per_class_mAP(seq_root_names, items): classwise_instances_results = [] classwise_anchor_images = [] classwise_annotation = [] categories = None annotation_id = 0 for e, (item, root_name) in enumerate(zip(items, seq_root_names)): - assert root_name in item[utils.SEQ_NAME_KEY] + assert ( + root_name in item[utils.SEQ_NAME_KEY] + ), f"Not found {root_name} in {item[utils.SEQ_NAME_KEY]} {utils.SEQ_NAME_KEY}" + + root_name = root_name.replace(NS_SEQ_PREFIX, "") seq_img_id_offset = SEQUENCE_TO_OFFSET[root_name] @@ -150,10 +128,6 @@ def compute_overall_mAP(class_name, items, no_cactus=False): os.remove(TMP_EVAL_FILE) os.remove(TMP_ANCH_FILE) - # print("\n") - # print(summary) - # print("\n") - return summary @@ -171,89 +145,9 @@ def coco_evaluation(ann_file, detections): coco_eval.accumulate() coco_eval.summarize() - import logging - - class dummyclass: - def __init__(self): - self._logger = logging.getLogger(__name__) - - # things = [i["name"] for i in coco_eval.cocoGt.cats.values()] - # out_all = COCOEvaluator._derive_coco_results( - # dummyclass(), coco_eval, iou_type="bbox", class_names=things - # ) - headers = ["AP", "AP50", "AP75", "APS", "APM", "APL"] npstat = np.array(coco_eval.stats[:6]) npstat = npstat * 100 # Percent - # npstat = np.around(npstat, 2) data_frame = pd.DataFrame([npstat], columns=headers) return data_frame - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "-r", - "--result_path", - required=True, - help="For example, '.../logs/runs/[pipeline]/[codec]/[datacatalog]/' ", - ) - parser.add_argument( - "-q", - "--quality_index", - required=False, - default=-1, - type=int, - help="Provide index of quality folders under the `result_path'. quality_index is only meant to point the orderd folders by qp names because there might be different range of qps are used for different sequences", - ) - parser.add_argument( - "-a", - "--all_qualities", - action="store_true", - help="run all 6 rate points in MPEG CTCs", - ) - parser.add_argument( - "-d", - "--dataset_path", - required=True, - help="For example, '.../vcm_testdata/[dataset]' ", - ) - parser.add_argument( - "-c", - "--class_to_compute", - type=str, - choices=CLASSES, - required=True, - ) - - args = parser.parse_args() - if args.all_qualities: - qualities = range(0, 6) - else: - qualities = [args.quality_index] - - with open( - f"{args.result_path}/{args.class_to_compute}.csv", "w", newline="" - ) as file: - writer = csv.writer(file) - for q in qualities: - items = utils.search_items( - args.result_path, - args.dataset_path, - q, - SEQS_BY_CLASS[args.class_to_compute], - BaseEvaluator.get_coco_eval_info_name, - ) - - assert ( - len(items) > 0 - ), "Nothing relevant information found from given directories..." - - summary = compute_overall_mAP(args.class_to_compute, items) - - writer.writerow([f"{q}", f"{summary['AP'][0]:.4f}"]) - print(f"{'=' * 10} FINAL OVERALL mAP SUMMARY {'=' * 10}") - print(f"{'-' * 32} AP : {summary['AP'][0]:.4f}") - print("\n\n") diff --git a/scripts/metrics/compute_per_class_miou.py b/scripts/metrics/compute_per_class_miou.py new file mode 100644 index 0000000..cd951f3 --- /dev/null +++ b/scripts/metrics/compute_per_class_miou.py @@ -0,0 +1,51 @@ +# Copyright (c) 2022-2024, InterDigital Communications, Inc +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted (subject to the limitations in the disclaimer +# below) provided that the following conditions are met: + +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of InterDigital Communications, Inc nor the names of its +# contributors may be used to endorse or promote products derived from this +# software without specific prior written permission. + +# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY +# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT +# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +r""" +Compute overall MOT over some sequences outputs + + + +""" + +from __future__ import annotations + +import json + + +def compute_per_class_mIoU(items): + miou_acc = 0.0 + for item in items: + with open(item["eval_info"], "r") as f: + results = json.load(f) + miou_acc += results["mIoU"] + + miou_acc = miou_acc / len(items) + + return miou_acc diff --git a/scripts/metrics/compute_overall_mot.py b/scripts/metrics/compute_per_class_mota.py similarity index 56% rename from scripts/metrics/compute_overall_mot.py rename to scripts/metrics/compute_per_class_mota.py index 5b86b45..974b694 100644 --- a/scripts/metrics/compute_overall_mot.py +++ b/scripts/metrics/compute_per_class_mota.py @@ -36,16 +36,13 @@ from __future__ import annotations -import argparse -import csv - -from typing import Any, Dict, List +from typing import Dict import motmetrics as mm import torch import utils -from compressai_vision.evaluators.evaluators import BaseEvaluator, MOT_JDE_Eval +from compressai_vision.evaluators.evaluators import MOT_JDE_Eval CLASSES = ["TVD", "HIEVE-1080P", "HIEVE-720P"] @@ -78,7 +75,7 @@ def get_accumulator_res_for_hieve(item: Dict): return acc, None, item[utils.SEQ_NAME_KEY] -def compute_overall_mota(class_name, items): +def compute_per_class_mota(class_name, items): get_accumulator_res = { CLASSES[0]: get_accumulator_res_for_tvd, CLASSES[1]: get_accumulator_res_for_hieve, @@ -103,87 +100,4 @@ def compute_overall_mota(class_name, items): metrics=mm.metrics.motchallenge_metrics, generate_overall=True, ) - # rendered_summary = mm.io.render_summary( - # summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names - # ) - - # print("\n\n") - # print(rendered_summary) - # print("\n") - - # names.append("Overall") return summary, names - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "-r", - "--result_path", - required=True, - help="For example, '.../logs/runs/[pipeline]/[codec]/[datacatalog]/' ", - ) - parser.add_argument( - "-q", - "--quality_index", - required=False, - default=-1, - type=int, - help="Provide index of quality folders under the `result_path'. quality_index is only meant to point the orderd folders by qp names because there might be different range of qps are used for different sequences", - ) - parser.add_argument( - "-a", - "--all_qualities", - action="store_true", - help="run all 6 rate points in MPEG CTCs", - ) - parser.add_argument( - "-d", - "--dataset_path", - required=True, - help="For example, '.../vcm_testdata/[dataset]' ", - ) - parser.add_argument( - "-c", - "--class_to_compute", - type=str, - choices=CLASSES, - required=True, - ) - - args = parser.parse_args() - if args.all_qualities: - qualities = range(0, 6) - else: - qualities = [args.quality_index] - - with open( - f"{args.result_path}/{args.class_to_compute}.csv", "w", newline="" - ) as file: - writer = csv.writer(file) - for q in qualities: - items = utils.search_items( - args.result_path, - args.dataset_path, - q, - SEQS_BY_CLASS[args.class_to_compute], - BaseEvaluator.get_jde_eval_info_name, - ) - - assert ( - len(items) > 0 - ), "Nothing relevant information found from given directories..." - - summary, names = compute_overall_mota(args.class_to_compute, items) - - motas = [100.0 * sv[13] for sv in summary.values] - - print(f"{'=' * 10} FINAL OVERALL MOTA SUMMARY {'=' * 10}") - print(f"{'-' * 35} : MOTA") - - for key, val in zip(names, motas): - print(f"{str(key):35} : {val:.4f}%") - if key == "Overall": - writer.writerow([str(q), f"{val:.4f}"]) - print("\n") diff --git a/scripts/metrics/curve_fitting.py b/scripts/metrics/curve_fitting.py index 9fe778d..07327c6 100644 --- a/scripts/metrics/curve_fitting.py +++ b/scripts/metrics/curve_fitting.py @@ -35,7 +35,6 @@ import copy import math -import sys import numpy as np import pandas as pd diff --git a/scripts/metrics/gen_mpeg_cttc_csv.py b/scripts/metrics/gen_mpeg_cttc_csv.py index 88177fe..15c47f5 100644 --- a/scripts/metrics/gen_mpeg_cttc_csv.py +++ b/scripts/metrics/gen_mpeg_cttc_csv.py @@ -44,13 +44,10 @@ import pandas as pd import utils -from compute_overall_map import compute_overall_mAP -from compute_overall_miou import compute_overall_mIoU -from compute_overall_mot import compute_overall_mota -from curve_fitting import ( - convert_to_monotonic_points_SFU, - convert_to_monotonic_points_TVD, -) +from compute_per_class_map import compute_per_class_mAP +from compute_per_class_miou import compute_per_class_mIoU +from compute_per_class_mota import compute_per_class_mota +from curve_fitting import convert_to_monotonic_points_SFU from compressai_vision.datasets import get_seq_info from compressai_vision.evaluators.evaluators import BaseEvaluator @@ -60,32 +57,41 @@ def read_df_rec( path, + dataset_prefix, seq_list, nb_operation_points, fn_regex=r"summary.csv", - prefix: str | None = None, ): - summary_csvs = [f for f in iglob(join(path, "**", fn_regex), recursive=True)] + all_summary_csvs = [f for f in iglob(join(path, "**", fn_regex), recursive=True)] if nb_operation_points > 0: seq_names = [ - file_path.split(path)[1].split("/")[0] for file_path in summary_csvs + file_path.split(path)[1].split("/")[0] for file_path in all_summary_csvs ] unique_seq_names = list(np.unique(seq_names)) for sequence in unique_seq_names: assert ( - len([f for f in summary_csvs if sequence in f]) == nb_operation_points + len([f for f in all_summary_csvs if sequence in f]) + == nb_operation_points ), f"Did not find {nb_operation_points} results for {sequence}" + # Only include specified sequences + matched_summary_csvs = [] + for seq in seq_list: + matched = [ + f"{dataset_prefix}{seq}" in summary_csv for summary_csv in all_summary_csvs + ] + found_at_least_one = False + for idx, match in enumerate(matched): + if match: + matched_summary_csvs.append([seq, all_summary_csvs[idx]]) + found_at_least_one = True + assert found_at_least_one, f"Found no summary.csv files for {seq}" + dfs = [] - for f in summary_csvs: + for seq, f in matched_summary_csvs: df = pd.read_csv(f) - - seq_dir = Path(os.path.relpath(f, path)).parts[0] - if prefix and prefix in seq_dir: - df["Dataset"] = df["Dataset"].apply( - lambda x: f"{prefix}{x}" if not x.startswith(f"{prefix}") else x - ) - + # Overwrite in dataframe to handle inconsistent name RaceHorsesC_832x480_30 found in summary.csv + df["Dataset"] = seq dfs.append(df) return pd.concat(dfs, ignore_index=True) @@ -96,45 +102,38 @@ def df_append(df1, df2): return out -def generate_classwise_df(result_df, classes: dict): - classwise = pd.DataFrame(columns=result_df.columns) - classwise.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - - for tag, item in classes.items(): - output = compute_class_wise_results(result_df, tag, item) - classwise_df = df_append(classwise, output) +def generate_class_df(result_df, classes: dict): + assert ( + len(classes) == 1 + ), "generate_class_df is expected to be called with a single class entry" - return classwise_df + ((tag, sequences),) = classes.items() + return compute_per_class_results(result_df, tag, sequences) -def compute_class_wise_results(result_df, name, sequences): - samples = None - num_points = prev_num_points = -1 - output = pd.DataFrame(columns=result_df.columns) - output.drop(columns=["fps", "num_of_coded_frame"], inplace=True) +def compute_per_class_results(result_df, name, sequences): + per_sequence_frames = [] + num_points = None for seq in sequences: - d = result_df.loc[(result_df["Dataset"] == seq)] + seq_frames = result_df.loc[result_df["Dataset"] == seq] - if samples is None: - samples = d + if num_points is None: + num_points = seq_frames.shape[0] else: - samples = df_append(samples, d) + assert num_points == seq_frames.shape[0] - if prev_num_points == -1: - num_points = prev_num_points = d.shape[0] - else: - assert prev_num_points == d.shape[0] + per_sequence_frames.append(seq_frames) + samples = pd.concat(per_sequence_frames, ignore_index=True) samples["length"] = samples["num_of_coded_frame"] / samples["fps"] + output = result_df.drop(columns=["fps", "num_of_coded_frame"]).head(0).copy() + for i in range(num_points): - # print(f"Set - {i}") points = samples.iloc[range(i, samples.shape[0], num_points)] total_length = points["length"].sum() - # print(points) - new_row = { output.columns[0]: [ name, @@ -162,20 +161,18 @@ def compute_class_wise_results(result_df, name, sequences): def generate_csv_classwise_video_map( result_path, dataset_path, - list_of_classwise_seq, - seq_list, + dict_of_class_seq, metric="AP", gt_folder="annotations", nb_operation_points: int = 4, - no_cactus: bool = False, skip_classwise: bool = False, seq_prefix: str = None, dataset_prefix: str = None, ): + seq_list = [seq for sequences in dict_of_class_seq.values() for seq in sequences] + opts_metrics = {"AP": 0, "AP50": 1, "AP75": 2, "APS": 3, "APM": 4, "APL": 5} - results_df = read_df_rec( - result_path, seq_list, nb_operation_points, prefix=seq_prefix - ) + results_df = read_df_rec(result_path, dataset_prefix, seq_list, nb_operation_points) # sort sorterIndex = dict(zip(seq_list, range(len(seq_list)))) @@ -187,32 +184,18 @@ def generate_csv_classwise_video_map( ## drop columns output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - if no_cactus: - indices_to_drop = output_df[output_df["Dataset"].str.contains("Cactus")].index - output_df.drop(indices_to_drop, inplace=True) - - for seqs_by_class in list_of_classwise_seq: - classwise_name = list(seqs_by_class.keys())[0] - classwise_seqs = list(seqs_by_class.values())[0] - - cur_seq_prefix = ( - seq_prefix - if seq_prefix - and any(name.startswith(seq_prefix) for name in classwise_seqs) - else None - ) - + for class_name, class_seqs in dict_of_class_seq.items(): class_wise_maps = [] for q in range(nb_operation_points): items = utils.search_items( result_path, dataset_path, q, - classwise_seqs, + class_seqs, BaseEvaluator.get_coco_eval_info_name, by_name=True, gt_folder=gt_folder, - seq_prefix=cur_seq_prefix, + seq_prefix=seq_prefix, dataset_prefix=dataset_prefix, ) @@ -221,22 +204,13 @@ def generate_csv_classwise_video_map( ), "No evaluation information found in provided result directories..." if not skip_classwise: - summary = compute_overall_mAP(classwise_name, items, no_cactus) + summary = compute_per_class_mAP(dict_of_class_seq[class_name], items) maps = summary.values[0][opts_metrics[metric]] class_wise_maps.append(maps) if not skip_classwise and nb_operation_points > 0: - matched_seq_names = [] - for seq_info in items: - name, _, _ = get_seq_info(seq_info[utils.SEQ_INFO_KEY]) - matched_seq_names.append( - f"{seq_prefix}{name}" - if seq_prefix and seq_prefix in seq_info[utils.SEQ_NAME_KEY] - else name - ) - - class_wise_results_df = generate_classwise_df( - results_df, {classwise_name: matched_seq_names} + class_wise_results_df = generate_class_df( + results_df, {class_name: class_seqs} ) class_wise_results_df["end_accuracy"] = class_wise_maps @@ -248,16 +222,14 @@ def generate_csv_classwise_video_map( def generate_csv_classwise_video_mota( result_path, dataset_path, - list_of_classwise_seq, + dict_of_class_seq, nb_operation_points: int = 4, + dataset_prefix: str = None, ): - seq_lists = [ - list(class_seq_dict.values())[0] for class_seq_dict in list_of_classwise_seq - ] seq_list = [] - [seq_list.extend(sequences) for sequences in seq_lists] + [seq_list.extend(sequences) for sequences in dict_of_class_seq.values()] - results_df = read_df_rec(result_path, seq_list, nb_operation_points) + results_df = read_df_rec(result_path, dataset_prefix, seq_list, nb_operation_points) results_df = results_df.sort_values(by=["Dataset", "qp"], ascending=[True, True]) # accuracy in % for MPEG template @@ -267,17 +239,14 @@ def generate_csv_classwise_video_mota( ## drop columns output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - for seqs_by_class in list_of_classwise_seq: - classwise_name = list(seqs_by_class.keys())[0] - classwise_seqs = list(seqs_by_class.values())[0] - + for class_name, class_seqs in dict_of_class_seq.items(): class_wise_motas = [] for q in range(nb_operation_points): items = utils.search_items( result_path, dataset_path, q, - classwise_seqs, + class_seqs, BaseEvaluator.get_jde_eval_info_name, ) @@ -285,19 +254,14 @@ def generate_csv_classwise_video_mota( len(items) > 0 ), "Nothing relevant information found from given directories..." - summary, _ = compute_overall_mota(classwise_name, items) + summary, _ = compute_per_class_mota(class_name, items) mota = summary.values[-1][13] * 100.0 class_wise_motas.append(mota) if nb_operation_points > 0: - matched_seq_names = [] - for seq_info in items: - name, _, _ = get_seq_info(seq_info[utils.SEQ_INFO_KEY]) - matched_seq_names.append(name) - - class_wise_results_df = generate_classwise_df( - results_df, {classwise_name: matched_seq_names} + class_wise_results_df = generate_class_df( + results_df, {class_name: class_seqs} ) class_wise_results_df["end_accuracy"] = class_wise_motas @@ -310,11 +274,13 @@ def generate_csv_classwise_video_mota( def generate_csv_classwise_video_miou( result_path, dataset_path, - list_of_classwise_seq, - seq_list, + dict_of_class_seq, nb_operation_points: int = 4, ): - results_df = read_df_rec(result_path, seq_list, nb_operation_points) + seq_list = [] + [seq_list.extend(sequences) for sequences in dict_of_class_seq.values()] + + results_df = read_df_rec(result_path, "", seq_list, nb_operation_points) # sort sorterIndex = dict(zip(seq_list, range(len(seq_list)))) @@ -326,20 +292,14 @@ def generate_csv_classwise_video_miou( ## drop columns output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - for seqs_by_class in list_of_classwise_seq: - classwise_name = list(seqs_by_class.keys())[0] - classwise_seqs = [ - seq.replace("PANDA", "") for seq in list(seqs_by_class.values())[0] - ] - + for class_name, class_seqs in dict_of_class_seq.items(): class_wise_mious = [] - # rate_range = [-1] if nb_operation_points == 1 else range(nb_operation_points) for q in range(nb_operation_points): items = utils.search_items( result_path, dataset_path, q, - classwise_seqs, + class_seqs, BaseEvaluator.get_miou_eval_info_name, by_name=True, pandaset_flag=True, @@ -349,7 +309,7 @@ def generate_csv_classwise_video_miou( len(items) > 0 ), "Nothing relevant information found from given directories..." - miou = compute_overall_mIoU(classwise_name, items) + miou = compute_per_class_mIoU(class_name, items) class_wise_mious.append(miou) matched_seq_names = [] @@ -357,9 +317,7 @@ def generate_csv_classwise_video_miou( name, _, _ = get_seq_info(seq_info[utils.SEQ_INFO_KEY]) matched_seq_names.append(name) - class_wise_results_df = generate_classwise_df( - results_df, {classwise_name: matched_seq_names} - ) + class_wise_results_df = generate_class_df(results_df, {class_name: class_seqs}) class_wise_results_df["end_accuracy"] = class_wise_mious @@ -369,7 +327,7 @@ def generate_csv_classwise_video_miou( def generate_csv(result_path, seq_list, nb_operation_points): - result_df = read_df_rec(result_path, seq_list, nb_operation_points) + result_df = read_df_rec(result_path, "", seq_list, nb_operation_points) # sort result_df = result_df.sort_values(by=["Dataset", "qp"], ascending=[True, True]) @@ -461,93 +419,67 @@ def generate_csv(result_path, seq_list, nb_operation_points): dataset_prefix = "sfu-hw-" class_ab = { "CLASS-AB": [ - "Traffic", - "Kimono", - "ParkScene", - "Cactus", - "BasketballDrive", - "BQTerrace", + "Traffic_2560x1600_30", + "Kimono_1920x1080_24", + "ParkScene_1920x1080_24", + "Cactus_1920x1080_50", + "BasketballDrive_1920x1080_50", + "BQTerrace_1920x1080_60", ] } if args.mode == "VCM": - class_ab["CLASS-AB"].remove("Kimono") - class_ab["CLASS-AB"].remove("Cactus") + class_ab["CLASS-AB"].remove("Kimono_1920x1080_24") + class_ab["CLASS-AB"].remove("Cactus_1920x1080_50") else: assert args.mode == "FCM" if args.no_cactus is True: - class_ab["CLASS-AB"].remove("Cactus") + class_ab["CLASS-AB"].remove("Cactus_1920x1080_50") class_c = { - "CLASS-C": ["BasketballDrill", "BQMall", "PartyScene", "RaceHorses_832x480"] + "CLASS-C": [ + "BasketballDrill_832x480_50", + "BQMall_832x480_60", + "PartyScene_832x480_50", + "RaceHorses_832x480_30", + ] } class_d = { "CLASS-D": [ - "BasketballPass", - "BQSquare", - "BlowingBubbles", - "RaceHorses_416x240", + "BasketballPass_416x240_50", + "BQSquare_416x240_60", + "BlowingBubbles_416x240_50", + "RaceHorses_416x240_30", ] } - classes = [class_ab, class_c, class_d] + classes = {**class_ab, **class_c, **class_d} if args.mode == "VCM" and args.include_optional: class_o = { "CLASS-O": [ - "Kimono", - "Cactus", + "Kimono_1920x1080_24", + "Cactus_1920x1080_50", ] } - classes.append(class_o) - - seq_list = [ - "Traffic_2560x1600_30", - "Kimono_1920x1080_24", - "ParkScene_1920x1080_24", - "Cactus_1920x1080_50", - "BasketballDrive_1920x1080_50", - "BQTerrace_1920x1080_60", - "BasketballDrill_832x480_50", - "BQMall_832x480_60", - "PartyScene_832x480_50", - "RaceHorsesC_832x480_30", - "BasketballPass_416x240_50", - "BQSquare_416x240_60", - "BlowingBubbles_416x240_50", - "RaceHorses_416x240_30", - ] + classes.update(class_o) if args.mode == "FCM" and args.add_non_scale: - ns_seq_list = ["ns_Traffic_2560x1600_30", "ns_BQTerrace_1920x1080_60"] - seq_list.extend(ns_seq_list) - seq_prefix = "ns_" class_ab_star = { "CLASS-AB*": [ - "ns_Traffic", - "ns_BQTerrace", + "ns_Traffic_2560x1600_30", + "ns_BQTerrace_1920x1080_60", ] } - classes.append(class_ab_star) - - if args.mode == "VCM" and not args.include_optional: - seq_list.remove("Kimono_1920x1080_24") - seq_list.remove("Cactus_1920x1080_50") - - if args.mode == "FCM" and args.no_cactus: - seq_list.remove("Cactus_1920x1080_50") + classes.update(class_ab_star) output_df = generate_csv_classwise_video_map( norm_result_path, args.dataset_path, classes, - seq_list, metric, args.gt_folder, args.nb_operation_points, - args.no_cactus, args.mode == "VCM", # skip classwise evaluation - seq_prefix=seq_prefix - if "seq_prefix" in locals() - else None, # adding prefix to non-scale sequence - dataset_prefix=dataset_prefix if "dataset_prefix" in locals() else None, + seq_prefix="ns_", + dataset_prefix="sfu-hw-", ) if args.mode == "VCM": @@ -567,8 +499,9 @@ def generate_csv(result_path, seq_list, nb_operation_points): output_df = generate_csv_classwise_video_mota( norm_result_path, args.dataset_path, - [tvd_all], + tvd_all, args.nb_operation_points, + dataset_prefix="mpeg-", ) else: tvd_all = { @@ -607,123 +540,69 @@ def generate_csv(result_path, seq_list, nb_operation_points): ) elif args.dataset_name == "HIEVE": - hieve_1080p = {"HIEVE-1080P": ["mpeg-hieve-13", "mpeg-hieve-16"]} - hieve_720p = {"HIEVE-720P": ["mpeg-hieve-2", "mpeg-hieve-17", "mpeg-hieve-18"]} + hieve = { + "HIEVE-1080P": ["hieve-13", "hieve-16"], + "HIEVE-720P": ["hieve-17", "hieve-18", "hieve-2"], + } output_df = generate_csv_classwise_video_mota( norm_result_path, args.dataset_path, - [hieve_1080p, hieve_720p], + hieve, args.nb_operation_points, + dataset_prefix="mpeg-", ) - # sort for FCM template - comply with the template provided in wg04n00459 - seq_list = [ - "13_1920x1080_30", - "16_1920x1080_30", - "17_1280x720_30", - "18_1280x720_30", - "2_1280x720_30", - "HIEVE-1080P", - "HIEVE-720", - ] - sorterIndex = dict(zip(seq_list, range(len(seq_list)))) - output_df["ds_rank"] = output_df["Dataset"].map(sorterIndex) - output_df.sort_values(["ds_rank", "qp"], ascending=[True, True], inplace=True) - output_df.drop(columns=["ds_rank"], inplace=True) elif args.dataset_name == "PANDASET": - PANDAM1 = { + pandaset = { "PANDAM1": [ - "PANDA057", - "PANDA058", - "PANDA069", - "PANDA070", - "PANDA072", - "PANDA073", - "PANDA077", - ] - } - PANDAM2 = { + "057", + "058", + "069", + "070", + "072", + "073", + "077", + ], "PANDAM2": [ - "PANDA003", - "PANDA011", - "PANDA016", - "PANDA017", - "PANDA021", - "PANDA023", - "PANDA027", - "PANDA029", - "PANDA030", - "PANDA033", - "PANDA035", - "PANDA037", - "PANDA039", - "PANDA043", - "PANDA053", - "PANDA056", - "PANDA097", - ] - } - PANDAM3 = { + "003", + "011", + "016", + "017", + "021", + "023", + "027", + "029", + "030", + "033", + "035", + "037", + "039", + "043", + "053", + "056", + "097", + ], "PANDAM3": [ - "PANDA088", - "PANDA089", - "PANDA090", - "PANDA095", - "PANDA109", - "PANDA112", - "PANDA113", - "PANDA115", - "PANDA117", - "PANDA119", - "PANDA122", - "PANDA124", - ] + "088", + "089", + "090", + "095", + "109", + "112", + "113", + "115", + "117", + "119", + "122", + "124", + ], } - seq_list = [ - "PANDA057", - "PANDA058", - "PANDA069", - "PANDA070", - "PANDA072", - "PANDA073", - "PANDA077", - "PANDA003", - "PANDA011", - "PANDA016", - "PANDA017", - "PANDA021", - "PANDA023", - "PANDA027", - "PANDA029", - "PANDA030", - "PANDA033", - "PANDA035", - "PANDA037", - "PANDA039", - "PANDA043", - "PANDA053", - "PANDA056", - "PANDA097", - "PANDA088", - "PANDA089", - "PANDA090", - "PANDA095", - "PANDA109", - "PANDA112", - "PANDA113", - "PANDA115", - "PANDA117", - "PANDA119", - "PANDA122", - "PANDA124", - ] - seq_list = [s[-3:] + "_1920x1080_30" for s in seq_list] output_df = generate_csv_classwise_video_miou( norm_result_path, args.dataset_path, - [PANDAM1, PANDAM2, PANDAM3], - seq_list, + pandaset, args.nb_operation_points, + dataset_prefix="pandaset-", ) else: raise NotImplementedError diff --git a/scripts/metrics/utils.py b/scripts/metrics/utils.py index 8cc83ea..9b68180 100644 --- a/scripts/metrics/utils.py +++ b/scripts/metrics/utils.py @@ -40,12 +40,9 @@ import re from pathlib import Path -from typing import Dict, Optional __all__ = [ "get_seq_number", - # "get_eval_info_path", - # "get_seq_info_path", ] SEQ_NAME_KEY = "seq_name"