-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluation.py
90 lines (75 loc) · 3.83 KB
/
evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import torch, os, yaml
import numpy as np
from reproduction.wrapper import PPNetWrapper
from reproduction.arguments import Arguments
from reproduction.lib.protopnet.helpers import makedir
def main():
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print(f"Device: {device}")
# VGG19 -> VGG 11 experiment
teacher = init_model('arguments/vgg19_teacher.yaml', device)
baseline_student = init_model('arguments/vgg11_baseline.yaml', device)
kd_student = init_model('arguments/vgg11_kd.yaml', device)
run_experiment('vgg19_11', teacher, baseline_student, kd_student)
# VGG19 -> VGG 16 experiment
teacher = init_model('arguments/vgg19_teacher.yaml', device)
baseline_student = init_model('arguments/vgg16_baseline.yaml', device)
kd_student = init_model('arguments/vgg16_kd.yaml', device)
run_experiment('vgg19_16', teacher, baseline_student, kd_student)
def init_model(args_filename, device):
""" Initializes a model from a given argument file
"""
print(f"Initializing from {args_filename}.")
args = Arguments(args_filename)
model = PPNetWrapper(args, device)
model.compute_indices_scores()
return model
def run_experiment(experiment_name, teacher, baseline_student, kd_student):
print(f'Running experiment: {experiment_name}')
results_dir = os.path.join('results', experiment_name)
makedir(results_dir)
# Save indices of test image patches and their similarity score with the corresponding prototype
save_indices_scores(results_dir, teacher, baseline_student, kd_student)
# Compute accuracies
teacher_accuracy = teacher.compute_accuracy()
baseline_accuracy = baseline_student.compute_accuracy()
kd_accuracy = kd_student.compute_accuracy()
# Compute PMS Scores and save student-teacher prototype matching indices
baseline_pms, baseline_best_allocation = baseline_student.compute_pms(teacher.indices_scores)
np.save(os.path.join(results_dir, 'baseline_best_allocation.npy'), baseline_best_allocation)
kd_pms, kd_best_allocation = kd_student.compute_pms(teacher.indices_scores)
np.save(os.path.join(results_dir, 'kd_best_allocation.npy'), kd_best_allocation)
# For each distance threshold (used to define an active patch), comput AAP and AJS
for dist_threshold in [0.01, 0.1, 0.2, 0.45, 1.0, 3.0, 5.0, None]:
results = {
'aap': {
'teacher': teacher.compute_aap(dist_threshold),
'baseline_student': baseline_student.compute_aap(dist_threshold),
'kd_student': kd_student.compute_aap(dist_threshold)
},
'ajs': {
'baseline_student': baseline_student.compute_ajs(dist_threshold, teacher.indices_scores),
'kd_student': kd_student.compute_ajs(dist_threshold, teacher.indices_scores)
},
'accuracy': {
'teacher': teacher_accuracy,
'baseline_student': baseline_accuracy,
'kd_student': kd_accuracy
},
'pms': {
'baseline_student': baseline_pms,
'kd_student': kd_pms
}
}
# Save results
results_dist_dir = os.path.join(results_dir, str(dist_threshold))
makedir(results_dist_dir)
with open(os.path.join(results_dist_dir, 'metrics.yaml'), 'w') as file:
yaml.dump(results, file)
print(f'Finished experiment: {experiment_name}')
def save_indices_scores(results_dir, teacher, baseline_student, kd_student):
np.save(os.path.join(results_dir, 'teacher_indices_scores.npy'), teacher.indices_scores)
np.save(os.path.join(results_dir, 'baseline_indices_scores.npy'), baseline_student.indices_scores)
np.save(os.path.join(results_dir, 'kd_indices_scores.npy'), kd_student.indices_scores)
if __name__ == '__main__':
main()