-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
128 lines (100 loc) · 5.17 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import os.path as osp
import torch
import logging
from eval import tad_eval
from sklearn.metrics import accuracy_score
import time
logger = logging.getLogger()
@torch.no_grad()
def test(model,
criterion,
postprocessor,
data_loader,
dataset_name,
epoch,
device,
args,
verbose=None):
model.eval()
criterion.eval()
postprocessor.eval()
iou_range = [0.3, 0.4, 0.5, 0.6, 0.7] if dataset_name == 'Thumos14' else [num/100 for num in range(50, 100, 5)]
logger.info(f'iou range {iou_range} in dataset {dataset_name}.')
if verbose != None:
print(f'iou range {iou_range} in dataset {dataset_name}.')
# action_evaluator = None
action_evaluator = getattr(tad_eval,dataset_name+"Evaluator")(dataset_name=dataset_name, epoch=epoch, dataset=data_loader.dataset, iou_range=iou_range,
nms_mode=['raw'],
eval_proposal=args.eval_proposal,
filter_threshold=args.filter_threshold
)
count = 0
for samples, targets in data_loader:
samples = samples.to(device)
classes = data_loader.dataset.classes
description_dict = data_loader.dataset.description_dict
outputs = model(samples, classes, description_dict,targets,epoch)
count = count + len(targets)
logger.info(f"Inference Epoch: {epoch} ({count}/{len(data_loader)*len(targets)})")
if verbose != None:
print(f"Inference Epoch: {epoch} ({count}/{len(data_loader)*len(targets)})")
# logger.info(f"Inference Epoch: {epoch} ({count}/{len(data_loader)*len(targets)}), loss_value:{loss_value}, loss_dict_scaled:{loss_dict_scaled}")
# logger.info(f"Inference Epoch: {epoch} ({count}/{len(data_loader)*len(targets)}), loss_dict_unscaled:{loss_dict_unscaled}")
# post process
video_duration = torch.FloatTensor([t["video_duration"] for t in targets]).to(device)
results = postprocessor(outputs, video_duration, args.eval_proposal)
res = {target['video_name']: output for target, output in zip(targets, results)}
# res_dict.update(res)
if action_evaluator is not None:
action_evaluator.update(res)
# accumulate predictions from all videos
if action_evaluator is not None:
action_evaluator.synchronize_between_processes()
action_evaluator.accumulate()
# dump detections
if args.save_result:
save_path = osp.join('./results/',args.dataset_name,args.model_name,'detection_{}_'+str(epoch)+'_{}.json')
action_evaluator.dump_detection(save_path)
action_evaluator.summarize()
# summary the all stats for logger
stats = {}
stats['iou_range'] = iou_range
stats['epoch'] = epoch
if action_evaluator is not None: # fusion the nms_mode to the name of key
for k, v in action_evaluator.stats.items():
for vk, vv in v.items():
stats[vk + '_' + k] = vv
mAP_values = ' '.join([f'{k}: {100*v:.2f}'.format(k, v) for k, v in stats.items() if k.startswith('mAP')])
logger.info(mAP_values)
stats['stats_summary'] = action_evaluator.stats_summary
return stats
if __name__ == "__main__":
import options
from options import merge_cfg_from_file
from utils.util import setup_seed
import dataset
from torch.utils.data import DataLoader
from utils.misc import collate_fn
from models.ConditionalDetr import build_model
import os
args = options.parser.parse_args()
if args.cfg_path is not None:
args = merge_cfg_from_file(args,args.cfg_path) # NOTE that the config comes from yaml file is the latest one.
device = torch.device(args.device)
seed=args.seed
setup_seed(seed)
# load dataset
print(f"Loading the dataset...")
val_dataset = getattr(dataset,args.dataset_name+"Dataset")(subset='inference', mode='inference', args=args)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, collate_fn=collate_fn, num_workers=args.num_workers, pin_memory=True, shuffle=False, drop_last=False)
# load model
print(f"Building the model...")
model, criterion, postprocessor = build_model(args,device)
ckpt_path = os.path.join("./ckpt",args.dataset_name,"best_"+args.model_name+".pkl")
model.load_state_dict(torch.load(ckpt_path))
model.to(device)
print(f"Starting the inference...")
test_stats = test(model,criterion,postprocessor,val_loader,args.dataset_name,-1,device,args,verbose=True)
print('||'.join(['Intermediate map @ {} = {:.3f} '.format(test_stats['iou_range'][i],test_stats['per_iou_ap_raw'][i]*100) for i in range(len(test_stats['iou_range']))]))
print('Intermediate mAP Avg ALL: {}'.format(test_stats['mAP_raw']*100))
print('Intermediate AR@1: {}, AR@5: {}, AR@10: {}, AR@25: {}, AR@40: {}, AR@50: {}, AR@100: {}, AUC: {}'.format(test_stats['AR@1_raw']*100, test_stats['AR@5_raw']*100, test_stats['AR@10_raw']*100, test_stats['AR@25_raw']*100, test_stats['AR@40_raw']*100, test_stats['AR@50_raw']*100, test_stats['AR@100_raw']*100, test_stats['AUC_raw']*100))