Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,6 @@
__pycache__/
checkpoints/
data/
.history/
.lh/
.vscode/
2 changes: 1 addition & 1 deletion configs/barlow_c10.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ train:
knn_k: 200
alpha: 0.4
eval: # linear evaluation, False will turn off automatic evaluation after training
type: "accum"
type: "all"
optimizer:
name: sgd
weight_decay: 0
Expand Down
4 changes: 2 additions & 2 deletions configs/barlow_c100.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: simsiam-c100-experiment-resnet18
name: barlow-c100-experiment-resnet18
dataset:
name: seq-cifar100
image_size: 32
Expand Down Expand Up @@ -28,7 +28,7 @@ train:
knn_k: 200
alpha: 0.4
eval: # linear evaluation, False will turn off automatic evaluation after training
type: "accum"
type: "all"
optimizer:
name: sgd
weight_decay: 0
Expand Down
4 changes: 2 additions & 2 deletions configs/barlow_tinyimagenet.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: simsiam-tinyimg-experiment-resnet18
name: barlow-tinyimg-experiment-resnet18
dataset:
name: seq-tinyimg
image_size: 64
Expand Down Expand Up @@ -28,7 +28,7 @@ train:
knn_k: 200
alpha: 0.4
eval: # linear evaluation, False will turn off automatic evaluation after training
type: "accum"
type: "all"
optimizer:
name: sgd
weight_decay: 0
Expand Down
2 changes: 1 addition & 1 deletion configs/simsiam_c10.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ train:
knn_k: 200
alpha: 0.4
eval: # linear evaluation, False will turn off automatic evaluation after training
type: "accum"
type: "all"
optimizer:
name: sgd
weight_decay: 0
Expand Down
6 changes: 3 additions & 3 deletions configs/simsiam_c100.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name: simsiam-c10-experiment-resnet18
name: simsiam-c100-experiment-resnet18
dataset:
name: seq-cifar10
name: seq-cifar100
image_size: 32
num_workers: 4

Expand Down Expand Up @@ -28,7 +28,7 @@ train:
knn_k: 200
alpha: 0.4
eval: # linear evaluation, False will turn off automatic evaluation after training
type: "accum"
type: "all"
optimizer:
name: sgd
weight_decay: 0
Expand Down
2 changes: 1 addition & 1 deletion configs/simsiam_tinyimagenet.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ train:
knn_k: 200
alpha: 0.4
eval: # linear evaluation, False will turn off automatic evaluation after training
type: "accum"
type: "all"
optimizer:
name: sgd
weight_decay: 0
Expand Down
14 changes: 7 additions & 7 deletions linear_eval_alltasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from datasets import get_dataset
from models.optimizers import get_optimizer, LR_Scheduler
from utils.loggers import *

from itertools import zip_longest

def evaluate_single(model, dataset, test_loader, memory_loader, device, k, last=False) -> Tuple[list, list, list, list]:
accs, accs_mask_classes = [], []
Expand Down Expand Up @@ -41,6 +41,7 @@ def main(device, args):
test_loaders.append(te)

for t in tqdm(range(0, dataset_copy.N_TASKS), desc='Evaluatinng'):
# NOTE: set eval_type to 'all' to evaluate on all tasks and get the average forgetting
if args.eval.type == 'all':
eval_tids = [j for j in range(dataset.N_TASKS)]
elif args.eval.type == 'curr':
Expand All @@ -62,15 +63,14 @@ def main(device, args):
knn_acc_list.append(acc)

kfgt = []
results['knn-cls-each-acc'].append(knn_acc_list[-1])
results['knn-cls-max-acc'].append(knn_acc_list[-1])
results['knn-cls-each-acc'].append(knn_acc_list)
# memorize max accuracy
results['knn-cls-max-acc'] = [max(item) for item in zip_longest(*results['knn-cls-each-acc'], fillvalue=0)][:t]
for j in range(t):
if knn_acc_list[j] > results['knn-cls-max-acc'][j]:
results['knn-cls-max-acc'][j] = knn_acc_list[j]
kfgt.append(results['knn-cls-each-acc'][j] - knn_acc_list[j])
kfgt.append(results['knn-cls-max-acc'][j] - knn_acc_list[j])
results['knn-cls-acc'].append(np.mean(knn_acc_list))
results['knn-cls-fgt'].append(np.mean(kfgt))
if len(kfgt) > 0:
results['knn-cls-fgt'].append(np.mean(kfgt))

print(results)
with open(os.path.join(f'{args.log_dir}', f"%s_accuracy_logs.txt"%args.name), 'w+') as f:
Expand Down
15 changes: 8 additions & 7 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from datasets.utils.continual_dataset import ContinualDataset
from models.utils.continual_model import ContinualModel
from typing import Tuple
from itertools import zip_longest


def evaluate(model: ContinualModel, dataset: ContinualDataset, device, classifier=None) -> Tuple[list, list]:
Expand Down Expand Up @@ -79,7 +80,8 @@ def main(device, args):

for t in range(dataset.N_TASKS):
# train_loader, memory_loader, test_loader = dataset.get_data_loaders(args)
if args.eval.type == 'all':
# NOTE: set eval_type to 'all' to evaluate on all tasks and get the average forgetting
if args.eval.type == 'all':
eval_tids = [j for j in range(dataset.N_TASKS)]
elif args.eval.type == 'curr':
eval_tids = [t]
Expand Down Expand Up @@ -111,15 +113,14 @@ def main(device, args):

kfgt = []
# memorize current task acc
results['knn-cls-each-acc'].append(knn_acc_list[-1])
results['knn-cls-max-acc'].append(knn_acc_list[-1])
results['knn-cls-each-acc'].append(knn_acc_list)
# memorize max accuracy
results['knn-cls-max-acc'] = [max(item) for item in zip_longest(*results['knn-cls-each-acc'], fillvalue=0)][:t]
for j in range(t):
if knn_acc_list[j] > results['knn-cls-max-acc'][j]:
results['knn-cls-max-acc'][j] = knn_acc_list[j]
kfgt.append(results['knn-cls-each-acc'][j] - knn_acc_list[j])
kfgt.append(results['knn-cls-max-acc'][j] - knn_acc_list[j])
results['knn-cls-acc'].append(np.mean(knn_acc_list))
results['knn-cls-fgt'].append(np.mean(kfgt))
if len(kfgt) > 0:
results['knn-cls-fgt'].append(np.mean(kfgt))

model_path = os.path.join(args.ckpt_dir, f"{args.model.cl_model}_{args.name}_{t}.pth")
torch.save({
Expand Down