From d59dcda2d15d82b7c654f72ad81598259adebad7 Mon Sep 17 00:00:00 2001 From: zhangliang <’zhang_liang_work@163.com‘> Date: Sun, 26 Feb 2023 13:22:14 +0800 Subject: [PATCH] fix issue of metric, update configs --- .gitignore | 3 +++ configs/barlow_c10.yaml | 2 +- configs/barlow_c100.yaml | 4 ++-- configs/barlow_tinyimagenet.yaml | 4 ++-- configs/simsiam_c10.yaml | 2 +- configs/simsiam_c100.yaml | 6 +++--- configs/simsiam_tinyimagenet.yaml | 2 +- linear_eval_alltasks.py | 14 +++++++------- main.py | 15 ++++++++------- 9 files changed, 28 insertions(+), 24 deletions(-) diff --git a/.gitignore b/.gitignore index 0171ad3..4f86f54 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,6 @@ __pycache__/ checkpoints/ data/ +.history/ +.lh/ +.vscode/ \ No newline at end of file diff --git a/configs/barlow_c10.yaml b/configs/barlow_c10.yaml index 7880a86..26fd5f5 100644 --- a/configs/barlow_c10.yaml +++ b/configs/barlow_c10.yaml @@ -28,7 +28,7 @@ train: knn_k: 200 alpha: 0.4 eval: # linear evaluation, False will turn off automatic evaluation after training - type: "accum" + type: "all" optimizer: name: sgd weight_decay: 0 diff --git a/configs/barlow_c100.yaml b/configs/barlow_c100.yaml index 3625b0b..d57d5f3 100644 --- a/configs/barlow_c100.yaml +++ b/configs/barlow_c100.yaml @@ -1,4 +1,4 @@ -name: simsiam-c100-experiment-resnet18 +name: barlow-c100-experiment-resnet18 dataset: name: seq-cifar100 image_size: 32 @@ -28,7 +28,7 @@ train: knn_k: 200 alpha: 0.4 eval: # linear evaluation, False will turn off automatic evaluation after training - type: "accum" + type: "all" optimizer: name: sgd weight_decay: 0 diff --git a/configs/barlow_tinyimagenet.yaml b/configs/barlow_tinyimagenet.yaml index 672b9e2..e31a792 100644 --- a/configs/barlow_tinyimagenet.yaml +++ b/configs/barlow_tinyimagenet.yaml @@ -1,4 +1,4 @@ -name: simsiam-tinyimg-experiment-resnet18 +name: barlow-tinyimg-experiment-resnet18 dataset: name: seq-tinyimg image_size: 64 @@ -28,7 +28,7 @@ train: knn_k: 200 alpha: 0.4 eval: # linear evaluation, False will turn off automatic evaluation after training - type: "accum" + type: "all" optimizer: name: sgd weight_decay: 0 diff --git a/configs/simsiam_c10.yaml b/configs/simsiam_c10.yaml index 8129f84..544e6b2 100644 --- a/configs/simsiam_c10.yaml +++ b/configs/simsiam_c10.yaml @@ -28,7 +28,7 @@ train: knn_k: 200 alpha: 0.4 eval: # linear evaluation, False will turn off automatic evaluation after training - type: "accum" + type: "all" optimizer: name: sgd weight_decay: 0 diff --git a/configs/simsiam_c100.yaml b/configs/simsiam_c100.yaml index 8129f84..6670e6f 100644 --- a/configs/simsiam_c100.yaml +++ b/configs/simsiam_c100.yaml @@ -1,6 +1,6 @@ -name: simsiam-c10-experiment-resnet18 +name: simsiam-c100-experiment-resnet18 dataset: - name: seq-cifar10 + name: seq-cifar100 image_size: 32 num_workers: 4 @@ -28,7 +28,7 @@ train: knn_k: 200 alpha: 0.4 eval: # linear evaluation, False will turn off automatic evaluation after training - type: "accum" + type: "all" optimizer: name: sgd weight_decay: 0 diff --git a/configs/simsiam_tinyimagenet.yaml b/configs/simsiam_tinyimagenet.yaml index e1de471..a51b099 100644 --- a/configs/simsiam_tinyimagenet.yaml +++ b/configs/simsiam_tinyimagenet.yaml @@ -28,7 +28,7 @@ train: knn_k: 200 alpha: 0.4 eval: # linear evaluation, False will turn off automatic evaluation after training - type: "accum" + type: "all" optimizer: name: sgd weight_decay: 0 diff --git a/linear_eval_alltasks.py b/linear_eval_alltasks.py index 18df6a9..59d687f 100644 --- a/linear_eval_alltasks.py +++ b/linear_eval_alltasks.py @@ -11,7 +11,7 @@ from datasets import get_dataset from models.optimizers import get_optimizer, LR_Scheduler from utils.loggers import * - +from itertools import zip_longest def evaluate_single(model, dataset, test_loader, memory_loader, device, k, last=False) -> Tuple[list, list, list, list]: accs, accs_mask_classes = [], [] @@ -41,6 +41,7 @@ def main(device, args): test_loaders.append(te) for t in tqdm(range(0, dataset_copy.N_TASKS), desc='Evaluatinng'): + # NOTE: set eval_type to 'all' to evaluate on all tasks and get the average forgetting if args.eval.type == 'all': eval_tids = [j for j in range(dataset.N_TASKS)] elif args.eval.type == 'curr': @@ -62,15 +63,14 @@ def main(device, args): knn_acc_list.append(acc) kfgt = [] - results['knn-cls-each-acc'].append(knn_acc_list[-1]) - results['knn-cls-max-acc'].append(knn_acc_list[-1]) + results['knn-cls-each-acc'].append(knn_acc_list) # memorize max accuracy + results['knn-cls-max-acc'] = [max(item) for item in zip_longest(*results['knn-cls-each-acc'], fillvalue=0)][:t] for j in range(t): - if knn_acc_list[j] > results['knn-cls-max-acc'][j]: - results['knn-cls-max-acc'][j] = knn_acc_list[j] - kfgt.append(results['knn-cls-each-acc'][j] - knn_acc_list[j]) + kfgt.append(results['knn-cls-max-acc'][j] - knn_acc_list[j]) results['knn-cls-acc'].append(np.mean(knn_acc_list)) - results['knn-cls-fgt'].append(np.mean(kfgt)) + if len(kfgt) > 0: + results['knn-cls-fgt'].append(np.mean(kfgt)) print(results) with open(os.path.join(f'{args.log_dir}', f"%s_accuracy_logs.txt"%args.name), 'w+') as f: diff --git a/main.py b/main.py index 5a4e0a8..744ce15 100644 --- a/main.py +++ b/main.py @@ -17,6 +17,7 @@ from datasets.utils.continual_dataset import ContinualDataset from models.utils.continual_model import ContinualModel from typing import Tuple +from itertools import zip_longest def evaluate(model: ContinualModel, dataset: ContinualDataset, device, classifier=None) -> Tuple[list, list]: @@ -79,7 +80,8 @@ def main(device, args): for t in range(dataset.N_TASKS): # train_loader, memory_loader, test_loader = dataset.get_data_loaders(args) - if args.eval.type == 'all': + # NOTE: set eval_type to 'all' to evaluate on all tasks and get the average forgetting + if args.eval.type == 'all': eval_tids = [j for j in range(dataset.N_TASKS)] elif args.eval.type == 'curr': eval_tids = [t] @@ -111,15 +113,14 @@ def main(device, args): kfgt = [] # memorize current task acc - results['knn-cls-each-acc'].append(knn_acc_list[-1]) - results['knn-cls-max-acc'].append(knn_acc_list[-1]) + results['knn-cls-each-acc'].append(knn_acc_list) # memorize max accuracy + results['knn-cls-max-acc'] = [max(item) for item in zip_longest(*results['knn-cls-each-acc'], fillvalue=0)][:t] for j in range(t): - if knn_acc_list[j] > results['knn-cls-max-acc'][j]: - results['knn-cls-max-acc'][j] = knn_acc_list[j] - kfgt.append(results['knn-cls-each-acc'][j] - knn_acc_list[j]) + kfgt.append(results['knn-cls-max-acc'][j] - knn_acc_list[j]) results['knn-cls-acc'].append(np.mean(knn_acc_list)) - results['knn-cls-fgt'].append(np.mean(kfgt)) + if len(kfgt) > 0: + results['knn-cls-fgt'].append(np.mean(kfgt)) model_path = os.path.join(args.ckpt_dir, f"{args.model.cl_model}_{args.name}_{t}.pth") torch.save({