From 307be81cfe16c37025be04bb5bb409b09c9ef490 Mon Sep 17 00:00:00 2001 From: Chris Rosewarne Date: Wed, 19 Nov 2025 14:30:48 +1100 Subject: [PATCH 1/5] [chore] Enforce metrics output as per dataset dict --- scripts/metrics/compute_overall_map.py | 60 +++-- scripts/metrics/gen_mpeg_cttc_csv.py | 341 +++++++++---------------- 2 files changed, 147 insertions(+), 254 deletions(-) diff --git a/scripts/metrics/compute_overall_map.py b/scripts/metrics/compute_overall_map.py index e2d57ef..ff10c8c 100644 --- a/scripts/metrics/compute_overall_map.py +++ b/scripts/metrics/compute_overall_map.py @@ -58,45 +58,41 @@ SEQS_BY_CLASS = { CLASSES[0]: [ - "Traffic", - "Kimono", - "ParkScene", - "Cactus", - "BasketballDrive", - "BQTerrace", + "Traffic_2560x1600_30", + "Kimono_1920x1080_24", + "ParkScene_1920x1080_24", + "Cactus_1920x1080_50", + "BasketballDrive_1920x1080_50", + "BQTerrace_1920x1080_60", ], - CLASSES[1]: ["BasketballDrill", "BQMall", "PartyScene", "RaceHorses_832x480"], - CLASSES[2]: ["BasketballPass", "BQSquare", "BlowingBubbles", "RaceHorses"], - CLASSES[3]: ["Traffic", "BQTerrace"], + CLASSES[1]: ["BasketballDrill_832x480_50", "BQMall_832x480_60", "PartyScene_832x480_50", "RaceHorses_832x480_832x480_30"], + CLASSES[2]: ["BasketballPass_416x240_50", "BQSquare_416x240_60", "BlowingBubbles_416x240_50", "RaceHorses_416x240_30"], + CLASSES[3]: ["ns_Traffic_2560x1600_30", "ns_BQTerrace_1920x1080_60"], } SEQUENCE_TO_OFFSET = { - "Traffic": 10000, - "Kimono": 20000, - "ParkScene": 30000, - "Cactus": 40000, - "BasketballDrive": 50000, - "BQTerrace": 60000, - "BasketballDrill": 70000, - "BQMall": 80000, - "PartyScene": 90000, - "RaceHorses_832x480": 100000, - "BasketballPass": 110000, - "BQSquare": 120000, - "BlowingBubbles": 130000, - "RaceHorses": 140000, + "Traffic_2560x1600_30": 10000, + "Kimono_1920x1080_24": 20000, + "ParkScene_1920x1080_24": 30000, + "Cactus_1920x1080_50": 40000, + "BasketballDrive_1920x1080_50": 50000, + "BQTerrace_1920x1080_60": 60000, + "BasketballDrill_832x480_50": 70000, + "BQMall_832x480_60": 80000, + "PartyScene_832x480_50": 90000, + "RaceHorses_832x480_30": 100000, + "BasketballPass_416x240_50": 110000, + "BQSquare_416x240_60": 120000, + "BlowingBubbles_416x240_50": 130000, + "RaceHorses_416x240_30": 140000, } TMP_EVAL_FILE = "tmp_eval.json" TMP_ANCH_FILE = "tmp_anch.json" +NS_SEQ_PREFIX = "ns_" # Prefix of non-scaled sequences -def compute_overall_mAP(class_name, items, no_cactus=False): - seq_root_names = SEQS_BY_CLASS[class_name] - - if no_cactus and class_name == "CLASS-AB": - if "Cactus" in seq_root_names: - seq_root_names.remove("Cactus") +def compute_overall_mAP(seq_root_names, items): classwise_instances_results = [] classwise_anchor_images = [] @@ -104,7 +100,9 @@ def compute_overall_mAP(class_name, items, no_cactus=False): categories = None annotation_id = 0 for e, (item, root_name) in enumerate(zip(items, seq_root_names)): - assert root_name in item[utils.SEQ_NAME_KEY] + assert root_name in item[utils.SEQ_NAME_KEY], f"Not found {root_name} in {item[utils.SEQ_NAME_KEY]} {utils.SEQ_NAME_KEY}" + + root_name = root_name.replace(NS_SEQ_PREFIX, "") seq_img_id_offset = SEQUENCE_TO_OFFSET[root_name] @@ -251,7 +249,7 @@ def __init__(self): len(items) > 0 ), "Nothing relevant information found from given directories..." - summary = compute_overall_mAP(args.class_to_compute, items) + summary = compute_overall_mAP(SEQS_BY_CLASS[args.class_to_compute], items) writer.writerow([f"{q}", f"{summary['AP'][0]:.4f}"]) print(f"{'=' * 10} FINAL OVERALL mAP SUMMARY {'=' * 10}") diff --git a/scripts/metrics/gen_mpeg_cttc_csv.py b/scripts/metrics/gen_mpeg_cttc_csv.py index 88177fe..95b4db1 100644 --- a/scripts/metrics/gen_mpeg_cttc_csv.py +++ b/scripts/metrics/gen_mpeg_cttc_csv.py @@ -60,32 +60,38 @@ def read_df_rec( path, + dataset_prefix, seq_list, nb_operation_points, fn_regex=r"summary.csv", - prefix: str | None = None, ): - summary_csvs = [f for f in iglob(join(path, "**", fn_regex), recursive=True)] + all_summary_csvs = [f for f in iglob(join(path, "**", fn_regex), recursive=True)] if nb_operation_points > 0: seq_names = [ - file_path.split(path)[1].split("/")[0] for file_path in summary_csvs + file_path.split(path)[1].split("/")[0] for file_path in all_summary_csvs ] unique_seq_names = list(np.unique(seq_names)) for sequence in unique_seq_names: assert ( - len([f for f in summary_csvs if sequence in f]) == nb_operation_points + len([f for f in all_summary_csvs if sequence in f]) == nb_operation_points ), f"Did not find {nb_operation_points} results for {sequence}" + # Only include specified sequences + matched_summary_csvs = [] + for seq in seq_list: + matched = [f"{dataset_prefix}{seq}" in summary_csv for summary_csv in all_summary_csvs] + found_at_least_one = False + for idx, match in enumerate(matched): + if match: + matched_summary_csvs.append([seq, all_summary_csvs[idx]]) + found_at_least_one = True + assert found_at_least_one, f"Found no summary.csv files for {seq}" + dfs = [] - for f in summary_csvs: + for seq, f in matched_summary_csvs: df = pd.read_csv(f) - - seq_dir = Path(os.path.relpath(f, path)).parts[0] - if prefix and prefix in seq_dir: - df["Dataset"] = df["Dataset"].apply( - lambda x: f"{prefix}{x}" if not x.startswith(f"{prefix}") else x - ) - + # Overwrite in dataframe to handle inconsistent name RaceHorsesC_832x480_30 found in summary.csv + df["Dataset"] = seq dfs.append(df) return pd.concat(dfs, ignore_index=True) @@ -162,19 +168,20 @@ def compute_class_wise_results(result_df, name, sequences): def generate_csv_classwise_video_map( result_path, dataset_path, - list_of_classwise_seq, - seq_list, + dict_of_classwise_seq, metric="AP", gt_folder="annotations", nb_operation_points: int = 4, - no_cactus: bool = False, skip_classwise: bool = False, seq_prefix: str = None, dataset_prefix: str = None, ): + seq_list = [] + [seq_list.extend(sequences) for sequences in dict_of_classwise_seq.values()] + opts_metrics = {"AP": 0, "AP50": 1, "AP75": 2, "APS": 3, "APM": 4, "APL": 5} results_df = read_df_rec( - result_path, seq_list, nb_operation_points, prefix=seq_prefix + result_path, dataset_prefix, seq_list, nb_operation_points ) # sort @@ -187,20 +194,7 @@ def generate_csv_classwise_video_map( ## drop columns output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - if no_cactus: - indices_to_drop = output_df[output_df["Dataset"].str.contains("Cactus")].index - output_df.drop(indices_to_drop, inplace=True) - - for seqs_by_class in list_of_classwise_seq: - classwise_name = list(seqs_by_class.keys())[0] - classwise_seqs = list(seqs_by_class.values())[0] - - cur_seq_prefix = ( - seq_prefix - if seq_prefix - and any(name.startswith(seq_prefix) for name in classwise_seqs) - else None - ) + for classwise_name, classwise_seqs in dict_of_classwise_seq.items(): class_wise_maps = [] for q in range(nb_operation_points): @@ -212,7 +206,7 @@ def generate_csv_classwise_video_map( BaseEvaluator.get_coco_eval_info_name, by_name=True, gt_folder=gt_folder, - seq_prefix=cur_seq_prefix, + seq_prefix=seq_prefix, dataset_prefix=dataset_prefix, ) @@ -221,22 +215,13 @@ def generate_csv_classwise_video_map( ), "No evaluation information found in provided result directories..." if not skip_classwise: - summary = compute_overall_mAP(classwise_name, items, no_cactus) + summary = compute_overall_mAP(dict_of_classwise_seq[classwise_name], items) maps = summary.values[0][opts_metrics[metric]] class_wise_maps.append(maps) if not skip_classwise and nb_operation_points > 0: - matched_seq_names = [] - for seq_info in items: - name, _, _ = get_seq_info(seq_info[utils.SEQ_INFO_KEY]) - matched_seq_names.append( - f"{seq_prefix}{name}" - if seq_prefix and seq_prefix in seq_info[utils.SEQ_NAME_KEY] - else name - ) - class_wise_results_df = generate_classwise_df( - results_df, {classwise_name: matched_seq_names} + results_df, {classwise_name: classwise_seqs} ) class_wise_results_df["end_accuracy"] = class_wise_maps @@ -248,16 +233,16 @@ def generate_csv_classwise_video_map( def generate_csv_classwise_video_mota( result_path, dataset_path, - list_of_classwise_seq, + dict_of_classwise_seq, nb_operation_points: int = 4, + dataset_prefix: str = None, ): - seq_lists = [ - list(class_seq_dict.values())[0] for class_seq_dict in list_of_classwise_seq - ] seq_list = [] - [seq_list.extend(sequences) for sequences in seq_lists] + [seq_list.extend(sequences) for sequences in dict_of_classwise_seq.values()] - results_df = read_df_rec(result_path, seq_list, nb_operation_points) + results_df = read_df_rec( + result_path, dataset_prefix, seq_list, nb_operation_points + ) results_df = results_df.sort_values(by=["Dataset", "qp"], ascending=[True, True]) # accuracy in % for MPEG template @@ -267,9 +252,7 @@ def generate_csv_classwise_video_mota( ## drop columns output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - for seqs_by_class in list_of_classwise_seq: - classwise_name = list(seqs_by_class.keys())[0] - classwise_seqs = list(seqs_by_class.values())[0] + for classwise_name, classwise_seqs in dict_of_classwise_seq.items(): class_wise_motas = [] for q in range(nb_operation_points): @@ -291,13 +274,8 @@ def generate_csv_classwise_video_mota( class_wise_motas.append(mota) if nb_operation_points > 0: - matched_seq_names = [] - for seq_info in items: - name, _, _ = get_seq_info(seq_info[utils.SEQ_INFO_KEY]) - matched_seq_names.append(name) - class_wise_results_df = generate_classwise_df( - results_df, {classwise_name: matched_seq_names} + results_df, {classwise_name: classwise_seqs} ) class_wise_results_df["end_accuracy"] = class_wise_motas @@ -310,11 +288,16 @@ def generate_csv_classwise_video_mota( def generate_csv_classwise_video_miou( result_path, dataset_path, - list_of_classwise_seq, - seq_list, + dict_of_classwise_seq, nb_operation_points: int = 4, + dataset_prefix : str = None, ): - results_df = read_df_rec(result_path, seq_list, nb_operation_points) + seq_list = [] + [seq_list.extend(sequences) for sequences in dict_of_classwise_seq.values()] + + results_df = read_df_rec( + result_path, "", seq_list, nb_operation_points + ) # sort sorterIndex = dict(zip(seq_list, range(len(seq_list)))) @@ -326,11 +309,7 @@ def generate_csv_classwise_video_miou( ## drop columns output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - for seqs_by_class in list_of_classwise_seq: - classwise_name = list(seqs_by_class.keys())[0] - classwise_seqs = [ - seq.replace("PANDA", "") for seq in list(seqs_by_class.values())[0] - ] + for classwise_name, classwise_seqs in dict_of_classwise_seq.items(): class_wise_mious = [] # rate_range = [-1] if nb_operation_points == 1 else range(nb_operation_points) @@ -358,7 +337,7 @@ def generate_csv_classwise_video_miou( matched_seq_names.append(name) class_wise_results_df = generate_classwise_df( - results_df, {classwise_name: matched_seq_names} + results_df, {classwise_name: classwise_seqs} ) class_wise_results_df["end_accuracy"] = class_wise_mious @@ -369,7 +348,7 @@ def generate_csv_classwise_video_miou( def generate_csv(result_path, seq_list, nb_operation_points): - result_df = read_df_rec(result_path, seq_list, nb_operation_points) + result_df = read_df_rec(result_path, "", seq_list, nb_operation_points) # sort result_df = result_df.sort_values(by=["Dataset", "qp"], ascending=[True, True]) @@ -461,93 +440,62 @@ def generate_csv(result_path, seq_list, nb_operation_points): dataset_prefix = "sfu-hw-" class_ab = { "CLASS-AB": [ - "Traffic", - "Kimono", - "ParkScene", - "Cactus", - "BasketballDrive", - "BQTerrace", + "Traffic_2560x1600_30", + "Kimono_1920x1080_24", + "ParkScene_1920x1080_24", + "Cactus_1920x1080_50", + "BasketballDrive_1920x1080_50", + "BQTerrace_1920x1080_60", ] } if args.mode == "VCM": - class_ab["CLASS-AB"].remove("Kimono") - class_ab["CLASS-AB"].remove("Cactus") + class_ab["CLASS-AB"].remove("Kimono_1920x1080_24") + class_ab["CLASS-AB"].remove("Cactus_1920x1080_50") else: assert args.mode == "FCM" if args.no_cactus is True: - class_ab["CLASS-AB"].remove("Cactus") + class_ab["CLASS-AB"].remove("Cactus_1920x1080_50") class_c = { - "CLASS-C": ["BasketballDrill", "BQMall", "PartyScene", "RaceHorses_832x480"] + "CLASS-C": ["BasketballDrill_832x480_50", "BQMall_832x480_60", "PartyScene_832x480_50", "RaceHorses_832x480_30"] } class_d = { "CLASS-D": [ - "BasketballPass", - "BQSquare", - "BlowingBubbles", - "RaceHorses_416x240", + "BasketballPass_416x240_50", + "BQSquare_416x240_60", + "BlowingBubbles_416x240_50", + "RaceHorses_416x240_30", ] } - classes = [class_ab, class_c, class_d] + classes = {**class_ab, **class_c, **class_d} if args.mode == "VCM" and args.include_optional: class_o = { "CLASS-O": [ - "Kimono", - "Cactus", + "Kimono_1920x1080_24", + "Cactus_1920x1080_50", ] } - classes.append(class_o) - - seq_list = [ - "Traffic_2560x1600_30", - "Kimono_1920x1080_24", - "ParkScene_1920x1080_24", - "Cactus_1920x1080_50", - "BasketballDrive_1920x1080_50", - "BQTerrace_1920x1080_60", - "BasketballDrill_832x480_50", - "BQMall_832x480_60", - "PartyScene_832x480_50", - "RaceHorsesC_832x480_30", - "BasketballPass_416x240_50", - "BQSquare_416x240_60", - "BlowingBubbles_416x240_50", - "RaceHorses_416x240_30", - ] + classes.update(class_o) if args.mode == "FCM" and args.add_non_scale: - ns_seq_list = ["ns_Traffic_2560x1600_30", "ns_BQTerrace_1920x1080_60"] - seq_list.extend(ns_seq_list) - seq_prefix = "ns_" class_ab_star = { "CLASS-AB*": [ - "ns_Traffic", - "ns_BQTerrace", + "ns_Traffic_2560x1600_30", + "ns_BQTerrace_1920x1080_60", ] } - classes.append(class_ab_star) - - if args.mode == "VCM" and not args.include_optional: - seq_list.remove("Kimono_1920x1080_24") - seq_list.remove("Cactus_1920x1080_50") - - if args.mode == "FCM" and args.no_cactus: - seq_list.remove("Cactus_1920x1080_50") + classes.update(class_ab_star) output_df = generate_csv_classwise_video_map( norm_result_path, args.dataset_path, classes, - seq_list, metric, args.gt_folder, args.nb_operation_points, - args.no_cactus, args.mode == "VCM", # skip classwise evaluation - seq_prefix=seq_prefix - if "seq_prefix" in locals() - else None, # adding prefix to non-scale sequence - dataset_prefix=dataset_prefix if "dataset_prefix" in locals() else None, + seq_prefix="ns_", + dataset_prefix="sfu-hw-", ) if args.mode == "VCM": @@ -567,8 +515,9 @@ def generate_csv(result_path, seq_list, nb_operation_points): output_df = generate_csv_classwise_video_mota( norm_result_path, args.dataset_path, - [tvd_all], + tvd_all, args.nb_operation_points, + dataset_prefix="mpeg-", ) else: tvd_all = { @@ -607,123 +556,69 @@ def generate_csv(result_path, seq_list, nb_operation_points): ) elif args.dataset_name == "HIEVE": - hieve_1080p = {"HIEVE-1080P": ["mpeg-hieve-13", "mpeg-hieve-16"]} - hieve_720p = {"HIEVE-720P": ["mpeg-hieve-2", "mpeg-hieve-17", "mpeg-hieve-18"]} + hieve = { + "HIEVE-1080P": ["hieve-13", "hieve-16"], + "HIEVE-720P": ["hieve-17", "hieve-18", "hieve-2"] + } output_df = generate_csv_classwise_video_mota( norm_result_path, args.dataset_path, - [hieve_1080p, hieve_720p], + hieve, args.nb_operation_points, + dataset_prefix="mpeg-", ) - # sort for FCM template - comply with the template provided in wg04n00459 - seq_list = [ - "13_1920x1080_30", - "16_1920x1080_30", - "17_1280x720_30", - "18_1280x720_30", - "2_1280x720_30", - "HIEVE-1080P", - "HIEVE-720", - ] - sorterIndex = dict(zip(seq_list, range(len(seq_list)))) - output_df["ds_rank"] = output_df["Dataset"].map(sorterIndex) - output_df.sort_values(["ds_rank", "qp"], ascending=[True, True], inplace=True) - output_df.drop(columns=["ds_rank"], inplace=True) elif args.dataset_name == "PANDASET": - PANDAM1 = { + pandaset = { "PANDAM1": [ - "PANDA057", - "PANDA058", - "PANDA069", - "PANDA070", - "PANDA072", - "PANDA073", - "PANDA077", - ] - } - PANDAM2 = { + "057", + "058", + "069", + "070", + "072", + "073", + "077", + ], "PANDAM2": [ - "PANDA003", - "PANDA011", - "PANDA016", - "PANDA017", - "PANDA021", - "PANDA023", - "PANDA027", - "PANDA029", - "PANDA030", - "PANDA033", - "PANDA035", - "PANDA037", - "PANDA039", - "PANDA043", - "PANDA053", - "PANDA056", - "PANDA097", - ] - } - PANDAM3 = { + "003", + "011", + "016", + "017", + "021", + "023", + "027", + "029", + "030", + "033", + "035", + "037", + "039", + "043", + "053", + "056", + "097", + ], "PANDAM3": [ - "PANDA088", - "PANDA089", - "PANDA090", - "PANDA095", - "PANDA109", - "PANDA112", - "PANDA113", - "PANDA115", - "PANDA117", - "PANDA119", - "PANDA122", - "PANDA124", + "088", + "089", + "090", + "095", + "109", + "112", + "113", + "115", + "117", + "119", + "122", + "124", ] } - seq_list = [ - "PANDA057", - "PANDA058", - "PANDA069", - "PANDA070", - "PANDA072", - "PANDA073", - "PANDA077", - "PANDA003", - "PANDA011", - "PANDA016", - "PANDA017", - "PANDA021", - "PANDA023", - "PANDA027", - "PANDA029", - "PANDA030", - "PANDA033", - "PANDA035", - "PANDA037", - "PANDA039", - "PANDA043", - "PANDA053", - "PANDA056", - "PANDA097", - "PANDA088", - "PANDA089", - "PANDA090", - "PANDA095", - "PANDA109", - "PANDA112", - "PANDA113", - "PANDA115", - "PANDA117", - "PANDA119", - "PANDA122", - "PANDA124", - ] - seq_list = [s[-3:] + "_1920x1080_30" for s in seq_list] output_df = generate_csv_classwise_video_miou( norm_result_path, args.dataset_path, - [PANDAM1, PANDAM2, PANDAM3], - seq_list, + pandaset, args.nb_operation_points, + dataset_prefix="pandaset-", ) else: raise NotImplementedError From 6898a12a2b0a67a27fff031b1e79f673453c94a8 Mon Sep 17 00:00:00 2001 From: Fabien Racape Date: Wed, 19 Nov 2025 22:54:15 -0800 Subject: [PATCH 2/5] fix: formatting --- scripts/metrics/compute_overall_map.py | 22 +++++++++++---- scripts/metrics/gen_mpeg_cttc_csv.py | 39 +++++++++++++------------- 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/scripts/metrics/compute_overall_map.py b/scripts/metrics/compute_overall_map.py index ff10c8c..369bb94 100644 --- a/scripts/metrics/compute_overall_map.py +++ b/scripts/metrics/compute_overall_map.py @@ -65,8 +65,18 @@ "BasketballDrive_1920x1080_50", "BQTerrace_1920x1080_60", ], - CLASSES[1]: ["BasketballDrill_832x480_50", "BQMall_832x480_60", "PartyScene_832x480_50", "RaceHorses_832x480_832x480_30"], - CLASSES[2]: ["BasketballPass_416x240_50", "BQSquare_416x240_60", "BlowingBubbles_416x240_50", "RaceHorses_416x240_30"], + CLASSES[1]: [ + "BasketballDrill_832x480_50", + "BQMall_832x480_60", + "PartyScene_832x480_50", + "RaceHorses_832x480_832x480_30", + ], + CLASSES[2]: [ + "BasketballPass_416x240_50", + "BQSquare_416x240_60", + "BlowingBubbles_416x240_50", + "RaceHorses_416x240_30", + ], CLASSES[3]: ["ns_Traffic_2560x1600_30", "ns_BQTerrace_1920x1080_60"], } @@ -90,17 +100,19 @@ TMP_EVAL_FILE = "tmp_eval.json" TMP_ANCH_FILE = "tmp_anch.json" -NS_SEQ_PREFIX = "ns_" # Prefix of non-scaled sequences +NS_SEQ_PREFIX = "ns_" # Prefix of non-scaled sequences -def compute_overall_mAP(seq_root_names, items): +def compute_overall_mAP(seq_root_names, items): classwise_instances_results = [] classwise_anchor_images = [] classwise_annotation = [] categories = None annotation_id = 0 for e, (item, root_name) in enumerate(zip(items, seq_root_names)): - assert root_name in item[utils.SEQ_NAME_KEY], f"Not found {root_name} in {item[utils.SEQ_NAME_KEY]} {utils.SEQ_NAME_KEY}" + assert ( + root_name in item[utils.SEQ_NAME_KEY] + ), f"Not found {root_name} in {item[utils.SEQ_NAME_KEY]} {utils.SEQ_NAME_KEY}" root_name = root_name.replace(NS_SEQ_PREFIX, "") diff --git a/scripts/metrics/gen_mpeg_cttc_csv.py b/scripts/metrics/gen_mpeg_cttc_csv.py index 95b4db1..29e40fe 100644 --- a/scripts/metrics/gen_mpeg_cttc_csv.py +++ b/scripts/metrics/gen_mpeg_cttc_csv.py @@ -73,13 +73,16 @@ def read_df_rec( unique_seq_names = list(np.unique(seq_names)) for sequence in unique_seq_names: assert ( - len([f for f in all_summary_csvs if sequence in f]) == nb_operation_points + len([f for f in all_summary_csvs if sequence in f]) + == nb_operation_points ), f"Did not find {nb_operation_points} results for {sequence}" # Only include specified sequences matched_summary_csvs = [] for seq in seq_list: - matched = [f"{dataset_prefix}{seq}" in summary_csv for summary_csv in all_summary_csvs] + matched = [ + f"{dataset_prefix}{seq}" in summary_csv for summary_csv in all_summary_csvs + ] found_at_least_one = False for idx, match in enumerate(matched): if match: @@ -180,9 +183,7 @@ def generate_csv_classwise_video_map( [seq_list.extend(sequences) for sequences in dict_of_classwise_seq.values()] opts_metrics = {"AP": 0, "AP50": 1, "AP75": 2, "APS": 3, "APM": 4, "APL": 5} - results_df = read_df_rec( - result_path, dataset_prefix, seq_list, nb_operation_points - ) + results_df = read_df_rec(result_path, dataset_prefix, seq_list, nb_operation_points) # sort sorterIndex = dict(zip(seq_list, range(len(seq_list)))) @@ -195,7 +196,6 @@ def generate_csv_classwise_video_map( output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) for classwise_name, classwise_seqs in dict_of_classwise_seq.items(): - class_wise_maps = [] for q in range(nb_operation_points): items = utils.search_items( @@ -215,7 +215,9 @@ def generate_csv_classwise_video_map( ), "No evaluation information found in provided result directories..." if not skip_classwise: - summary = compute_overall_mAP(dict_of_classwise_seq[classwise_name], items) + summary = compute_overall_mAP( + dict_of_classwise_seq[classwise_name], items + ) maps = summary.values[0][opts_metrics[metric]] class_wise_maps.append(maps) @@ -240,9 +242,7 @@ def generate_csv_classwise_video_mota( seq_list = [] [seq_list.extend(sequences) for sequences in dict_of_classwise_seq.values()] - results_df = read_df_rec( - result_path, dataset_prefix, seq_list, nb_operation_points - ) + results_df = read_df_rec(result_path, dataset_prefix, seq_list, nb_operation_points) results_df = results_df.sort_values(by=["Dataset", "qp"], ascending=[True, True]) # accuracy in % for MPEG template @@ -253,7 +253,6 @@ def generate_csv_classwise_video_mota( output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) for classwise_name, classwise_seqs in dict_of_classwise_seq.items(): - class_wise_motas = [] for q in range(nb_operation_points): items = utils.search_items( @@ -290,14 +289,12 @@ def generate_csv_classwise_video_miou( dataset_path, dict_of_classwise_seq, nb_operation_points: int = 4, - dataset_prefix : str = None, + dataset_prefix: str = None, ): seq_list = [] [seq_list.extend(sequences) for sequences in dict_of_classwise_seq.values()] - results_df = read_df_rec( - result_path, "", seq_list, nb_operation_points - ) + results_df = read_df_rec(result_path, "", seq_list, nb_operation_points) # sort sorterIndex = dict(zip(seq_list, range(len(seq_list)))) @@ -310,7 +307,6 @@ def generate_csv_classwise_video_miou( output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) for classwise_name, classwise_seqs in dict_of_classwise_seq.items(): - class_wise_mious = [] # rate_range = [-1] if nb_operation_points == 1 else range(nb_operation_points) for q in range(nb_operation_points): @@ -457,7 +453,12 @@ def generate_csv(result_path, seq_list, nb_operation_points): class_ab["CLASS-AB"].remove("Cactus_1920x1080_50") class_c = { - "CLASS-C": ["BasketballDrill_832x480_50", "BQMall_832x480_60", "PartyScene_832x480_50", "RaceHorses_832x480_30"] + "CLASS-C": [ + "BasketballDrill_832x480_50", + "BQMall_832x480_60", + "PartyScene_832x480_50", + "RaceHorses_832x480_30", + ] } class_d = { "CLASS-D": [ @@ -558,7 +559,7 @@ def generate_csv(result_path, seq_list, nb_operation_points): elif args.dataset_name == "HIEVE": hieve = { "HIEVE-1080P": ["hieve-13", "hieve-16"], - "HIEVE-720P": ["hieve-17", "hieve-18", "hieve-2"] + "HIEVE-720P": ["hieve-17", "hieve-18", "hieve-2"], } output_df = generate_csv_classwise_video_mota( norm_result_path, @@ -610,7 +611,7 @@ def generate_csv(result_path, seq_list, nb_operation_points): "119", "122", "124", - ] + ], } output_df = generate_csv_classwise_video_miou( From 56617901714f5fe9869b031687765b781b99c620 Mon Sep 17 00:00:00 2001 From: Fabien Racape Date: Fri, 21 Nov 2025 13:41:38 -0800 Subject: [PATCH 3/5] chores: remove unused scripts --- scripts/metrics/compute_overall_map.py | 11 -- scripts/metrics/compute_overall_miou.py | 131 ------------------------ scripts/metrics/compute_overall_mot.py | 98 +----------------- scripts/metrics/curve_fitting.py | 2 - scripts/metrics/gen_mpeg_cttc_csv.py | 6 +- 5 files changed, 3 insertions(+), 245 deletions(-) diff --git a/scripts/metrics/compute_overall_map.py b/scripts/metrics/compute_overall_map.py index 369bb94..6e887e2 100644 --- a/scripts/metrics/compute_overall_map.py +++ b/scripts/metrics/compute_overall_map.py @@ -41,8 +41,6 @@ import json import os -from typing import Any, List - import numpy as np import pandas as pd import utils @@ -183,15 +181,6 @@ def coco_evaluation(ann_file, detections): import logging - class dummyclass: - def __init__(self): - self._logger = logging.getLogger(__name__) - - # things = [i["name"] for i in coco_eval.cocoGt.cats.values()] - # out_all = COCOEvaluator._derive_coco_results( - # dummyclass(), coco_eval, iou_type="bbox", class_names=things - # ) - headers = ["AP", "AP50", "AP75", "APS", "APM", "APL"] npstat = np.array(coco_eval.stats[:6]) npstat = npstat * 100 # Percent diff --git a/scripts/metrics/compute_overall_miou.py b/scripts/metrics/compute_overall_miou.py index a015f4b..8f4eff4 100644 --- a/scripts/metrics/compute_overall_miou.py +++ b/scripts/metrics/compute_overall_miou.py @@ -36,63 +36,8 @@ from __future__ import annotations -import argparse -import csv import json -import compute_overall_mot -import utils - -from compressai_vision.evaluators.evaluators import BaseEvaluator - -CLASSES = ["PANDAM1", "PANDAM2", "PANDAM2"] - -SEQS_BY_CLASS = { - CLASSES[0]: [ - "PANDA057", - "PANDA058", - "PANDA069", - "PANDA070", - "PANDA072", - "PANDA073", - "PANDA077", - ], - CLASSES[1]: [ - "PANDA003", - "PANDA011", - "PANDA016", - "PANDA017", - "PANDA021", - "PANDA023", - "PANDA027", - "PANDA029", - "PANDA030", - "PANDA033", - "PANDA035", - "PANDA037", - "PANDA039", - "PANDA043", - "PANDA053", - "PANDA056", - "PANDA097", - ], - CLASSES[2]: [ - "PANDA088", - "PANDA089", - "PANDA090", - "PANDA095", - "PANDA109", - "PANDA112", - "PANDA113", - "PANDA115", - "PANDA117", - "PANDA119", - "PANDA122", - "PANDA124", - ], -} - - def compute_overall_mIoU(class_name, items): miou_acc = 0.0 for item in items: @@ -103,79 +48,3 @@ def compute_overall_mIoU(class_name, items): miou_acc = miou_acc / len(items) return miou_acc - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "-r", - "--result_path", - required=True, - help="For example, '.../logs/runs/[pipeline]/[codec]/[datacatalog]/' ", - ) - parser.add_argument( - "-q", - "--quality_index", - required=False, - default=-1, - type=int, - help="Provide index of quality folders under the `result_path'. quality_index is only meant to point the orderd folders by qp names because there might be different range of qps are used for different sequences", - ) - parser.add_argument( - "-a", - "--all_qualities", - action="store_true", - help="run all 6 rate points in MPEG CTCs", - ) - parser.add_argument( - "-d", - "--dataset_path", - required=True, - help="For example, '.../vcm_testdata/[dataset]' ", - ) - parser.add_argument( - "-c", - "--class_to_compute", - type=str, - choices=CLASSES, - required=True, - ) - - args = parser.parse_args() - if args.all_qualities: - qualities = range(0, 6) - else: - qualities = [args.quality_index] - - with open( - f"{args.result_path}/{args.class_to_compute}.csv", "w", newline="" - ) as file: - writer = csv.writer(file) - for q in qualities: - items = utils.search_items( - args.result_path, - args.dataset_path, - q, - SEQS_BY_CLASS[args.class_to_compute], - BaseEvaluator.get_jde_eval_info_name, - ) - - assert ( - len(items) > 0 - ), "Nothing relevant information found from given directories..." - - summary, names = compute_overall_mot.compute_overall_mota( - args.class_to_compute, items - ) - - motas = [100.0 * sv[13] for sv in summary.values] - - print(f"{'=' * 10} FINAL OVERALL MOTA SUMMARY {'=' * 10}") - print(f"{'-' * 35} : MOTA") - - for key, val in zip(names, motas): - print(f"{str(key):35} : {val:.4f}%") - if key == "Overall": - writer.writerow([str(q), f"{val:.4f}"]) - print("\n") diff --git a/scripts/metrics/compute_overall_mot.py b/scripts/metrics/compute_overall_mot.py index 5b86b45..c023f2f 100644 --- a/scripts/metrics/compute_overall_mot.py +++ b/scripts/metrics/compute_overall_mot.py @@ -36,24 +36,13 @@ from __future__ import annotations -import argparse -import csv - -from typing import Any, Dict, List +from typing import Dict import motmetrics as mm import torch import utils -from compressai_vision.evaluators.evaluators import BaseEvaluator, MOT_JDE_Eval - -CLASSES = ["TVD", "HIEVE-1080P", "HIEVE-720P"] - -SEQS_BY_CLASS = { - CLASSES[0]: ["TVD-01", "TVD-02", "TVD-03"], - CLASSES[1]: ["HIEVE-13", "HIEVE-16"], - CLASSES[2]: ["HIEVE-2", "HIEVE-17", "HIEVE-18"], -} +from compressai_vision.evaluators.evaluators import MOT_JDE_Eval def get_accumulator_res_for_tvd(item: Dict): @@ -103,87 +92,4 @@ def compute_overall_mota(class_name, items): metrics=mm.metrics.motchallenge_metrics, generate_overall=True, ) - # rendered_summary = mm.io.render_summary( - # summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names - # ) - - # print("\n\n") - # print(rendered_summary) - # print("\n") - - # names.append("Overall") return summary, names - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "-r", - "--result_path", - required=True, - help="For example, '.../logs/runs/[pipeline]/[codec]/[datacatalog]/' ", - ) - parser.add_argument( - "-q", - "--quality_index", - required=False, - default=-1, - type=int, - help="Provide index of quality folders under the `result_path'. quality_index is only meant to point the orderd folders by qp names because there might be different range of qps are used for different sequences", - ) - parser.add_argument( - "-a", - "--all_qualities", - action="store_true", - help="run all 6 rate points in MPEG CTCs", - ) - parser.add_argument( - "-d", - "--dataset_path", - required=True, - help="For example, '.../vcm_testdata/[dataset]' ", - ) - parser.add_argument( - "-c", - "--class_to_compute", - type=str, - choices=CLASSES, - required=True, - ) - - args = parser.parse_args() - if args.all_qualities: - qualities = range(0, 6) - else: - qualities = [args.quality_index] - - with open( - f"{args.result_path}/{args.class_to_compute}.csv", "w", newline="" - ) as file: - writer = csv.writer(file) - for q in qualities: - items = utils.search_items( - args.result_path, - args.dataset_path, - q, - SEQS_BY_CLASS[args.class_to_compute], - BaseEvaluator.get_jde_eval_info_name, - ) - - assert ( - len(items) > 0 - ), "Nothing relevant information found from given directories..." - - summary, names = compute_overall_mota(args.class_to_compute, items) - - motas = [100.0 * sv[13] for sv in summary.values] - - print(f"{'=' * 10} FINAL OVERALL MOTA SUMMARY {'=' * 10}") - print(f"{'-' * 35} : MOTA") - - for key, val in zip(names, motas): - print(f"{str(key):35} : {val:.4f}%") - if key == "Overall": - writer.writerow([str(q), f"{val:.4f}"]) - print("\n") diff --git a/scripts/metrics/curve_fitting.py b/scripts/metrics/curve_fitting.py index 9fe778d..5781ff3 100644 --- a/scripts/metrics/curve_fitting.py +++ b/scripts/metrics/curve_fitting.py @@ -35,8 +35,6 @@ import copy import math -import sys - import numpy as np import pandas as pd diff --git a/scripts/metrics/gen_mpeg_cttc_csv.py b/scripts/metrics/gen_mpeg_cttc_csv.py index 29e40fe..e2543f8 100644 --- a/scripts/metrics/gen_mpeg_cttc_csv.py +++ b/scripts/metrics/gen_mpeg_cttc_csv.py @@ -47,10 +47,7 @@ from compute_overall_map import compute_overall_mAP from compute_overall_miou import compute_overall_mIoU from compute_overall_mot import compute_overall_mota -from curve_fitting import ( - convert_to_monotonic_points_SFU, - convert_to_monotonic_points_TVD, -) +from curve_fitting import convert_to_monotonic_points_SFU from compressai_vision.datasets import get_seq_info from compressai_vision.evaluators.evaluators import BaseEvaluator @@ -308,7 +305,6 @@ def generate_csv_classwise_video_miou( for classwise_name, classwise_seqs in dict_of_classwise_seq.items(): class_wise_mious = [] - # rate_range = [-1] if nb_operation_points == 1 else range(nb_operation_points) for q in range(nb_operation_points): items = utils.search_items( result_path, From ca20621dac387daccf17ab9691556d2dcf07e41c Mon Sep 17 00:00:00 2001 From: Fabien Racape Date: Fri, 21 Nov 2025 14:35:36 -0800 Subject: [PATCH 4/5] refactor: metrics --- ...verall_map.py => compute_per_class_map.py} | 106 +----------------- ...rall_miou.py => compute_per_class_miou.py} | 2 +- ...erall_mot.py => compute_per_class_mota.py} | 10 +- scripts/metrics/gen_mpeg_cttc_csv.py | 63 +++++------ scripts/metrics/utils.py | 3 - 5 files changed, 43 insertions(+), 141 deletions(-) rename scripts/metrics/{compute_overall_map.py => compute_per_class_map.py} (63%) rename scripts/metrics/{compute_overall_miou.py => compute_per_class_miou.py} (97%) rename scripts/metrics/{compute_overall_mot.py => compute_per_class_mota.py} (92%) diff --git a/scripts/metrics/compute_overall_map.py b/scripts/metrics/compute_per_class_map.py similarity index 63% rename from scripts/metrics/compute_overall_map.py rename to scripts/metrics/compute_per_class_map.py index 6e887e2..ac57a9c 100644 --- a/scripts/metrics/compute_overall_map.py +++ b/scripts/metrics/compute_per_class_map.py @@ -36,8 +36,6 @@ from __future__ import annotations -import argparse -import csv import json import os @@ -45,38 +43,12 @@ import pandas as pd import utils - # from detectron2.evaluation import COCOEvaluator from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval -from compressai_vision.evaluators.evaluators import BaseEvaluator - CLASSES = ["CLASS-AB", "CLASS-C", "CLASS-D", "CLASS-AB*"] -SEQS_BY_CLASS = { - CLASSES[0]: [ - "Traffic_2560x1600_30", - "Kimono_1920x1080_24", - "ParkScene_1920x1080_24", - "Cactus_1920x1080_50", - "BasketballDrive_1920x1080_50", - "BQTerrace_1920x1080_60", - ], - CLASSES[1]: [ - "BasketballDrill_832x480_50", - "BQMall_832x480_60", - "PartyScene_832x480_50", - "RaceHorses_832x480_832x480_30", - ], - CLASSES[2]: [ - "BasketballPass_416x240_50", - "BQSquare_416x240_60", - "BlowingBubbles_416x240_50", - "RaceHorses_416x240_30", - ], - CLASSES[3]: ["ns_Traffic_2560x1600_30", "ns_BQTerrace_1920x1080_60"], -} SEQUENCE_TO_OFFSET = { "Traffic_2560x1600_30": 10000, @@ -101,7 +73,7 @@ NS_SEQ_PREFIX = "ns_" # Prefix of non-scaled sequences -def compute_overall_mAP(seq_root_names, items): +def compute_per_class_mAP(seq_root_names, items): classwise_instances_results = [] classwise_anchor_images = [] classwise_annotation = [] @@ -158,10 +130,6 @@ def compute_overall_mAP(seq_root_names, items): os.remove(TMP_EVAL_FILE) os.remove(TMP_ANCH_FILE) - # print("\n") - # print(summary) - # print("\n") - return summary @@ -179,80 +147,10 @@ def coco_evaluation(ann_file, detections): coco_eval.accumulate() coco_eval.summarize() - import logging headers = ["AP", "AP50", "AP75", "APS", "APM", "APL"] npstat = np.array(coco_eval.stats[:6]) npstat = npstat * 100 # Percent - # npstat = np.around(npstat, 2) data_frame = pd.DataFrame([npstat], columns=headers) - return data_frame - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "-r", - "--result_path", - required=True, - help="For example, '.../logs/runs/[pipeline]/[codec]/[datacatalog]/' ", - ) - parser.add_argument( - "-q", - "--quality_index", - required=False, - default=-1, - type=int, - help="Provide index of quality folders under the `result_path'. quality_index is only meant to point the orderd folders by qp names because there might be different range of qps are used for different sequences", - ) - parser.add_argument( - "-a", - "--all_qualities", - action="store_true", - help="run all 6 rate points in MPEG CTCs", - ) - parser.add_argument( - "-d", - "--dataset_path", - required=True, - help="For example, '.../vcm_testdata/[dataset]' ", - ) - parser.add_argument( - "-c", - "--class_to_compute", - type=str, - choices=CLASSES, - required=True, - ) - - args = parser.parse_args() - if args.all_qualities: - qualities = range(0, 6) - else: - qualities = [args.quality_index] - - with open( - f"{args.result_path}/{args.class_to_compute}.csv", "w", newline="" - ) as file: - writer = csv.writer(file) - for q in qualities: - items = utils.search_items( - args.result_path, - args.dataset_path, - q, - SEQS_BY_CLASS[args.class_to_compute], - BaseEvaluator.get_coco_eval_info_name, - ) - - assert ( - len(items) > 0 - ), "Nothing relevant information found from given directories..." - - summary = compute_overall_mAP(SEQS_BY_CLASS[args.class_to_compute], items) - - writer.writerow([f"{q}", f"{summary['AP'][0]:.4f}"]) - print(f"{'=' * 10} FINAL OVERALL mAP SUMMARY {'=' * 10}") - print(f"{'-' * 32} AP : {summary['AP'][0]:.4f}") - print("\n\n") + return data_frame \ No newline at end of file diff --git a/scripts/metrics/compute_overall_miou.py b/scripts/metrics/compute_per_class_miou.py similarity index 97% rename from scripts/metrics/compute_overall_miou.py rename to scripts/metrics/compute_per_class_miou.py index 8f4eff4..3b8a538 100644 --- a/scripts/metrics/compute_overall_miou.py +++ b/scripts/metrics/compute_per_class_miou.py @@ -38,7 +38,7 @@ import json -def compute_overall_mIoU(class_name, items): +def compute_per_class_mIoU(items): miou_acc = 0.0 for item in items: with open(item["eval_info"], "r") as f: diff --git a/scripts/metrics/compute_overall_mot.py b/scripts/metrics/compute_per_class_mota.py similarity index 92% rename from scripts/metrics/compute_overall_mot.py rename to scripts/metrics/compute_per_class_mota.py index c023f2f..974b694 100644 --- a/scripts/metrics/compute_overall_mot.py +++ b/scripts/metrics/compute_per_class_mota.py @@ -44,6 +44,14 @@ from compressai_vision.evaluators.evaluators import MOT_JDE_Eval +CLASSES = ["TVD", "HIEVE-1080P", "HIEVE-720P"] + +SEQS_BY_CLASS = { + CLASSES[0]: ["TVD-01", "TVD-02", "TVD-03"], + CLASSES[1]: ["HIEVE-13", "HIEVE-16"], + CLASSES[2]: ["HIEVE-2", "HIEVE-17", "HIEVE-18"], +} + def get_accumulator_res_for_tvd(item: Dict): _gt_pd = MOT_JDE_Eval._load_gt_in_motchallenge(item[utils.GT_INFO_KEY]) @@ -67,7 +75,7 @@ def get_accumulator_res_for_hieve(item: Dict): return acc, None, item[utils.SEQ_NAME_KEY] -def compute_overall_mota(class_name, items): +def compute_per_class_mota(class_name, items): get_accumulator_res = { CLASSES[0]: get_accumulator_res_for_tvd, CLASSES[1]: get_accumulator_res_for_hieve, diff --git a/scripts/metrics/gen_mpeg_cttc_csv.py b/scripts/metrics/gen_mpeg_cttc_csv.py index e2543f8..0e33c63 100644 --- a/scripts/metrics/gen_mpeg_cttc_csv.py +++ b/scripts/metrics/gen_mpeg_cttc_csv.py @@ -44,9 +44,9 @@ import pandas as pd import utils -from compute_overall_map import compute_overall_mAP -from compute_overall_miou import compute_overall_mIoU -from compute_overall_mot import compute_overall_mota +from compute_per_class_mAP import compute_per_class_mAP +from compute_per_class_mIoU import compute_per_class_mIoU +from compute_per_class_mota import compute_per_class_mota from curve_fitting import convert_to_monotonic_points_SFU from compressai_vision.datasets import get_seq_info @@ -102,18 +102,18 @@ def df_append(df1, df2): return out -def generate_classwise_df(result_df, classes: dict): - classwise = pd.DataFrame(columns=result_df.columns) - classwise.drop(columns=["fps", "num_of_coded_frame"], inplace=True) +def generate_class_df(result_df, classes: dict): + class_data = pd.DataFrame(columns=result_df.columns) + class_data.drop(columns=["fps", "num_of_coded_frame"], inplace=True) for tag, item in classes.items(): - output = compute_class_wise_results(result_df, tag, item) - classwise_df = df_append(classwise, output) + output = compute_per_class_results(result_df, tag, item) + classwise_df = df_append(class_data, output) return classwise_df -def compute_class_wise_results(result_df, name, sequences): +def compute_per_class_results(result_df, name, sequences): samples = None num_points = prev_num_points = -1 output = pd.DataFrame(columns=result_df.columns) @@ -168,7 +168,7 @@ def compute_class_wise_results(result_df, name, sequences): def generate_csv_classwise_video_map( result_path, dataset_path, - dict_of_classwise_seq, + dict_of_class_seq, metric="AP", gt_folder="annotations", nb_operation_points: int = 4, @@ -177,7 +177,7 @@ def generate_csv_classwise_video_map( dataset_prefix: str = None, ): seq_list = [] - [seq_list.extend(sequences) for sequences in dict_of_classwise_seq.values()] + [seq_list.extend(sequences) for sequences in dict_of_class_seq.values()] opts_metrics = {"AP": 0, "AP50": 1, "AP75": 2, "APS": 3, "APM": 4, "APL": 5} results_df = read_df_rec(result_path, dataset_prefix, seq_list, nb_operation_points) @@ -192,14 +192,14 @@ def generate_csv_classwise_video_map( ## drop columns output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - for classwise_name, classwise_seqs in dict_of_classwise_seq.items(): + for class_name, class_seqs in dict_of_class_seq.items(): class_wise_maps = [] for q in range(nb_operation_points): items = utils.search_items( result_path, dataset_path, q, - classwise_seqs, + class_seqs, BaseEvaluator.get_coco_eval_info_name, by_name=True, gt_folder=gt_folder, @@ -212,15 +212,15 @@ def generate_csv_classwise_video_map( ), "No evaluation information found in provided result directories..." if not skip_classwise: - summary = compute_overall_mAP( - dict_of_classwise_seq[classwise_name], items + summary = compute_per_class_mAP( + dict_of_class_seq[class_name], items ) maps = summary.values[0][opts_metrics[metric]] class_wise_maps.append(maps) if not skip_classwise and nb_operation_points > 0: - class_wise_results_df = generate_classwise_df( - results_df, {classwise_name: classwise_seqs} + class_wise_results_df = generate_class_df( + results_df, {class_name: class_seqs} ) class_wise_results_df["end_accuracy"] = class_wise_maps @@ -232,12 +232,12 @@ def generate_csv_classwise_video_map( def generate_csv_classwise_video_mota( result_path, dataset_path, - dict_of_classwise_seq, + dict_of_class_seq, nb_operation_points: int = 4, dataset_prefix: str = None, ): seq_list = [] - [seq_list.extend(sequences) for sequences in dict_of_classwise_seq.values()] + [seq_list.extend(sequences) for sequences in dict_of_class_seq.values()] results_df = read_df_rec(result_path, dataset_prefix, seq_list, nb_operation_points) results_df = results_df.sort_values(by=["Dataset", "qp"], ascending=[True, True]) @@ -249,14 +249,14 @@ def generate_csv_classwise_video_mota( ## drop columns output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - for classwise_name, classwise_seqs in dict_of_classwise_seq.items(): + for class_name, class_seqs in dict_of_class_seq.items(): class_wise_motas = [] for q in range(nb_operation_points): items = utils.search_items( result_path, dataset_path, q, - classwise_seqs, + class_seqs, BaseEvaluator.get_jde_eval_info_name, ) @@ -264,14 +264,14 @@ def generate_csv_classwise_video_mota( len(items) > 0 ), "Nothing relevant information found from given directories..." - summary, _ = compute_overall_mota(classwise_name, items) + summary, _ = compute_per_class_mota(class_name, items) mota = summary.values[-1][13] * 100.0 class_wise_motas.append(mota) if nb_operation_points > 0: - class_wise_results_df = generate_classwise_df( - results_df, {classwise_name: classwise_seqs} + class_wise_results_df = generate_class_df( + results_df, {class_name: class_seqs} ) class_wise_results_df["end_accuracy"] = class_wise_motas @@ -284,12 +284,11 @@ def generate_csv_classwise_video_mota( def generate_csv_classwise_video_miou( result_path, dataset_path, - dict_of_classwise_seq, + dict_of_class_seq, nb_operation_points: int = 4, - dataset_prefix: str = None, ): seq_list = [] - [seq_list.extend(sequences) for sequences in dict_of_classwise_seq.values()] + [seq_list.extend(sequences) for sequences in dict_of_class_seq.values()] results_df = read_df_rec(result_path, "", seq_list, nb_operation_points) @@ -303,14 +302,14 @@ def generate_csv_classwise_video_miou( ## drop columns output_df.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - for classwise_name, classwise_seqs in dict_of_classwise_seq.items(): + for class_name, class_seqs in dict_of_class_seq.items(): class_wise_mious = [] for q in range(nb_operation_points): items = utils.search_items( result_path, dataset_path, q, - classwise_seqs, + class_seqs, BaseEvaluator.get_miou_eval_info_name, by_name=True, pandaset_flag=True, @@ -320,7 +319,7 @@ def generate_csv_classwise_video_miou( len(items) > 0 ), "Nothing relevant information found from given directories..." - miou = compute_overall_mIoU(classwise_name, items) + miou = compute_per_class_mIoU(class_name, items) class_wise_mious.append(miou) matched_seq_names = [] @@ -328,8 +327,8 @@ def generate_csv_classwise_video_miou( name, _, _ = get_seq_info(seq_info[utils.SEQ_INFO_KEY]) matched_seq_names.append(name) - class_wise_results_df = generate_classwise_df( - results_df, {classwise_name: classwise_seqs} + class_wise_results_df = generate_class_df( + results_df, {class_name: class_seqs} ) class_wise_results_df["end_accuracy"] = class_wise_mious diff --git a/scripts/metrics/utils.py b/scripts/metrics/utils.py index 8cc83ea..9b68180 100644 --- a/scripts/metrics/utils.py +++ b/scripts/metrics/utils.py @@ -40,12 +40,9 @@ import re from pathlib import Path -from typing import Dict, Optional __all__ = [ "get_seq_number", - # "get_eval_info_path", - # "get_seq_info_path", ] SEQ_NAME_KEY = "seq_name" From 4f484d2a0cdb005933b457592cf61937507d7514 Mon Sep 17 00:00:00 2001 From: Fabien Racape Date: Fri, 21 Nov 2025 14:54:34 -0800 Subject: [PATCH 5/5] refctor: clarify the generation of metrics per class --- scripts/metrics/compute_per_class_map.py | 7 +-- scripts/metrics/compute_per_class_miou.py | 1 + scripts/metrics/curve_fitting.py | 1 + scripts/metrics/gen_mpeg_cttc_csv.py | 52 +++++++++-------------- 4 files changed, 24 insertions(+), 37 deletions(-) diff --git a/scripts/metrics/compute_per_class_map.py b/scripts/metrics/compute_per_class_map.py index ac57a9c..1db0ee8 100644 --- a/scripts/metrics/compute_per_class_map.py +++ b/scripts/metrics/compute_per_class_map.py @@ -43,13 +43,11 @@ import pandas as pd import utils + # from detectron2.evaluation import COCOEvaluator from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval -CLASSES = ["CLASS-AB", "CLASS-C", "CLASS-D", "CLASS-AB*"] - - SEQUENCE_TO_OFFSET = { "Traffic_2560x1600_30": 10000, "Kimono_1920x1080_24": 20000, @@ -147,10 +145,9 @@ def coco_evaluation(ann_file, detections): coco_eval.accumulate() coco_eval.summarize() - headers = ["AP", "AP50", "AP75", "APS", "APM", "APL"] npstat = np.array(coco_eval.stats[:6]) npstat = npstat * 100 # Percent data_frame = pd.DataFrame([npstat], columns=headers) - return data_frame \ No newline at end of file + return data_frame diff --git a/scripts/metrics/compute_per_class_miou.py b/scripts/metrics/compute_per_class_miou.py index 3b8a538..cd951f3 100644 --- a/scripts/metrics/compute_per_class_miou.py +++ b/scripts/metrics/compute_per_class_miou.py @@ -38,6 +38,7 @@ import json + def compute_per_class_mIoU(items): miou_acc = 0.0 for item in items: diff --git a/scripts/metrics/curve_fitting.py b/scripts/metrics/curve_fitting.py index 5781ff3..07327c6 100644 --- a/scripts/metrics/curve_fitting.py +++ b/scripts/metrics/curve_fitting.py @@ -35,6 +35,7 @@ import copy import math + import numpy as np import pandas as pd diff --git a/scripts/metrics/gen_mpeg_cttc_csv.py b/scripts/metrics/gen_mpeg_cttc_csv.py index 0e33c63..15c47f5 100644 --- a/scripts/metrics/gen_mpeg_cttc_csv.py +++ b/scripts/metrics/gen_mpeg_cttc_csv.py @@ -44,8 +44,8 @@ import pandas as pd import utils -from compute_per_class_mAP import compute_per_class_mAP -from compute_per_class_mIoU import compute_per_class_mIoU +from compute_per_class_map import compute_per_class_mAP +from compute_per_class_miou import compute_per_class_mIoU from compute_per_class_mota import compute_per_class_mota from curve_fitting import convert_to_monotonic_points_SFU @@ -103,44 +103,37 @@ def df_append(df1, df2): def generate_class_df(result_df, classes: dict): - class_data = pd.DataFrame(columns=result_df.columns) - class_data.drop(columns=["fps", "num_of_coded_frame"], inplace=True) - - for tag, item in classes.items(): - output = compute_per_class_results(result_df, tag, item) - classwise_df = df_append(class_data, output) + assert ( + len(classes) == 1 + ), "generate_class_df is expected to be called with a single class entry" - return classwise_df + ((tag, sequences),) = classes.items() + return compute_per_class_results(result_df, tag, sequences) def compute_per_class_results(result_df, name, sequences): - samples = None - num_points = prev_num_points = -1 - output = pd.DataFrame(columns=result_df.columns) - output.drop(columns=["fps", "num_of_coded_frame"], inplace=True) + per_sequence_frames = [] + num_points = None for seq in sequences: - d = result_df.loc[(result_df["Dataset"] == seq)] + seq_frames = result_df.loc[result_df["Dataset"] == seq] - if samples is None: - samples = d + if num_points is None: + num_points = seq_frames.shape[0] else: - samples = df_append(samples, d) + assert num_points == seq_frames.shape[0] - if prev_num_points == -1: - num_points = prev_num_points = d.shape[0] - else: - assert prev_num_points == d.shape[0] + per_sequence_frames.append(seq_frames) + samples = pd.concat(per_sequence_frames, ignore_index=True) samples["length"] = samples["num_of_coded_frame"] / samples["fps"] + output = result_df.drop(columns=["fps", "num_of_coded_frame"]).head(0).copy() + for i in range(num_points): - # print(f"Set - {i}") points = samples.iloc[range(i, samples.shape[0], num_points)] total_length = points["length"].sum() - # print(points) - new_row = { output.columns[0]: [ name, @@ -176,8 +169,7 @@ def generate_csv_classwise_video_map( seq_prefix: str = None, dataset_prefix: str = None, ): - seq_list = [] - [seq_list.extend(sequences) for sequences in dict_of_class_seq.values()] + seq_list = [seq for sequences in dict_of_class_seq.values() for seq in sequences] opts_metrics = {"AP": 0, "AP50": 1, "AP75": 2, "APS": 3, "APM": 4, "APL": 5} results_df = read_df_rec(result_path, dataset_prefix, seq_list, nb_operation_points) @@ -212,9 +204,7 @@ def generate_csv_classwise_video_map( ), "No evaluation information found in provided result directories..." if not skip_classwise: - summary = compute_per_class_mAP( - dict_of_class_seq[class_name], items - ) + summary = compute_per_class_mAP(dict_of_class_seq[class_name], items) maps = summary.values[0][opts_metrics[metric]] class_wise_maps.append(maps) @@ -327,9 +317,7 @@ def generate_csv_classwise_video_miou( name, _, _ = get_seq_info(seq_info[utils.SEQ_INFO_KEY]) matched_seq_names.append(name) - class_wise_results_df = generate_class_df( - results_df, {class_name: class_seqs} - ) + class_wise_results_df = generate_class_df(results_df, {class_name: class_seqs}) class_wise_results_df["end_accuracy"] = class_wise_mious