-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtest.py
More file actions
executable file
·124 lines (101 loc) · 4.33 KB
/
test.py
File metadata and controls
executable file
·124 lines (101 loc) · 4.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import os
import sys
sys.path.append(os.getcwd())
import argparse
import numpy as np
import random
import torch
from datasets import create_dataset
from models import create_model
from torch.utils.data import DataLoader
from tqdm import tqdm
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def get_args_parser():
parser = argparse.ArgumentParser('Set config')
parser.add_argument('--dataset', default='', type=str)
parser.add_argument('--img_size', default=384, type=int)
parser.add_argument('--max_obj_num', default=100, type=int)
parser.add_argument('--max_lang_num', default=100, type=int)
parser.add_argument('--num_queries', default=256, type=int)
parser.add_argument('--num_decoder_layers', default=6, type=int)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--frame_num', default=3, type=int)
parser.add_argument('--dynamic', default=True, action='store_true')
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-3, type=float)
parser.add_argument('--text_encoder_lr', default=1e-5, type=float)
parser.add_argument('--lr_step', default=[45, 80], type=int, nargs='+')
parser.add_argument('--warmup-epoch', type=int, default=-1)
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--val_epoch', default=1, type=int)
parser.add_argument('--verbose_step', default=10, type=int)
parser.add_argument('--pretrain', default='', type=str)
parser.add_argument('--work_dir', default='outputs/debug', type=str)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--butd', action='store_true')
args = parser.parse_args()
if args.debug:
args.work_dir = "debug"
args.num_workers = 0
args.batch_size = 2
return args
@torch.no_grad()
def evaluate(args, model, dataset, dataloader):
model.eval()
loss = 0
total_predict_boxes = []
for input_data in tqdm(dataloader, colour='red', unit=' data'):
for key in input_data:
if isinstance(input_data[key], torch.Tensor):
input_data[key] = input_data[key].cuda()
end_points = model(input_data)
for key in input_data:
if key not in end_points:
end_points[key] = input_data[key]
# contrast
pred_center = end_points['last_center'].detach().cpu()
pred_size = end_points["last_pred_size"].detach().cpu()
pred_boxes = torch.concat([pred_center, pred_size], dim=-1).numpy()
proj_tokens = end_points['proj_tokens'] # (B, tokens, 64)
proj_queries = end_points['last_proj_queries'] # (B, Q, 64)
sem_scores = torch.matmul(proj_queries, proj_tokens.transpose(-1, -2))
sem_scores_ = sem_scores / 0.07 # (B, Q, tokens)
sem_scores = torch.softmax(sem_scores_, dim=-1)
token = end_points['tokenized']
mask = token['attention_mask'].detach().cpu()
last_pos = mask.sum(1) - 2
bs = sem_scores.shape[0]
pred_box = np.zeros((bs, 7))
for i in range(bs):
sim = 1 - sem_scores[i, :, last_pos[i]]
max_idx = torch.argmax(sim)
box = pred_boxes[i, max_idx.item()]
pred_box[i, :6] = box
total_predict_boxes.append(pred_box)
predict_boxes = np.vstack(total_predict_boxes)
acc25, acc50, m_iou = dataset.evaluate(predict_boxes)
loss = loss / len(dataloader)
info = f"Acc25={acc25} Acc50={acc50} mIoU={m_iou}"
print(info)
def main(args):
set_random_seed(args.seed)
print("Create Dataset")
test_dataset = create_dataset(args, 'test')
generator = torch.Generator()
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=args.num_workers, generator=generator)
print("Create Model")
model = create_model(args)
model.load_state_dict(torch.load(args.pretrain, map_location='cpu')['model'], strict=True)
model.cuda()
evaluate(args, model, test_dataset, test_loader)
return
if __name__ == '__main__':
args = get_args_parser()
main(args)