-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathtest_single_image.py
180 lines (147 loc) · 7 KB
/
test_single_image.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import argparse
import os
import cv2
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as standard_transforms
import util.misc as utils
from models import build_model
def get_args_parser():
parser = argparse.ArgumentParser('Set Point Query Transformer', add_help=False)
# model parameters
# - backbone
parser.add_argument('--backbone', default='vgg16_bn', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned', 'fourier'),
help="Type of positional embedding to use on top of the image features")
# - transformer
parser.add_argument('--dec_layers', default=2, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=512, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.0, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
# loss parameters
# - matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_point', default=0.05, type=float,
help="SmoothL1 point coefficient in the matching cost")
# - loss coefficients
parser.add_argument('--ce_loss_coef', default=1.0, type=float) # classification loss coefficient
parser.add_argument('--point_loss_coef', default=5.0, type=float) # regression loss coefficient
parser.add_argument('--eos_coef', default=0.5, type=float,
help="Relative classification weight of the no-object class") # cross-entropy weights
# dataset parameters
parser.add_argument('--dataset_file', default="SHA")
parser.add_argument('--data_path', default="./data/ShanghaiTech/PartA", type=str)
# misc parameters
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--vis_dir', default="")
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
class DeNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
def visualization(samples, pred, vis_dir, img_path, split_map=None):
"""
Visualize predictions
"""
pil_to_tensor = standard_transforms.ToTensor()
restore_transform = standard_transforms.Compose([
DeNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
standard_transforms.ToPILImage()
])
images = samples.tensors
masks = samples.mask
for idx in range(images.shape[0]):
sample = restore_transform(images[idx])
sample = pil_to_tensor(sample.convert('RGB')).numpy() * 255
sample_vis = sample.transpose([1, 2, 0])[:, :, ::-1].astype(np.uint8).copy()
# draw predictions (green)
size = 3
for p in pred[idx]:
sample_vis = cv2.circle(sample_vis, (int(p[1]), int(p[0])), size, (0, 255, 0), -1)
# draw split map
if split_map is not None:
imgH, imgW = sample_vis.shape[:2]
split_map = (split_map * 255).astype(np.uint8)
split_map = cv2.applyColorMap(split_map, cv2.COLORMAP_JET)
split_map = cv2.resize(split_map, (imgW, imgH), interpolation=cv2.INTER_NEAREST)
sample_vis = split_map * 0.9 + sample_vis
# save image
if vis_dir is not None:
# eliminate invalid area
imgH, imgW = masks.shape[-2:]
valid_area = torch.where(~masks[idx])
valid_h, valid_w = valid_area[0][-1], valid_area[1][-1]
sample_vis = sample_vis[:valid_h+1, :valid_w+1]
name = img_path.split('/')[-1].split('.')[0]
img_save_path = os.path.join(vis_dir, '{}_pred{}.jpg'.format(name, len(pred[idx])))
cv2.imwrite(img_save_path, sample_vis)
print('image save to ', img_save_path)
@torch.no_grad()
def evaluate_single_image(model, img_path, device, vis_dir=None):
model.eval()
if vis_dir is not None:
os.makedirs(vis_dir, exist_ok=True)
# load image
img = cv2.imread(img_path)
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
# transform image
transform = standard_transforms.Compose([
standard_transforms.ToTensor(), standard_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
img = transform(img)
img = torch.Tensor(img)
samples = utils.nested_tensor_from_tensor_list([img])
samples = samples.to(device)
img_h, img_w = samples.tensors.shape[-2:]
# inference
outputs = model(samples, test=True)
raw_scores = torch.nn.functional.softmax(outputs['pred_logits'], -1)
outputs_scores = raw_scores[:, :, 1][0]
outputs_points = outputs['pred_points'][0]
print('prediction: ', len(outputs_scores))
# visualize predictions
if vis_dir:
points = [[point[0]*img_h, point[1]*img_w] for point in outputs_points] # recover to actual points
split_map = (outputs['split_map_raw'][0].detach().cpu().squeeze(0) > 0.5).float().numpy()
visualization(samples, [points], vis_dir, img_path, split_map=split_map)
def main(args):
# input image and model
args.img_path = 'your_image_path'
args.resume = 'your_model_path'
args.vis_dir = ''
# build model
device = torch.device(args.device)
model, criterion = build_model(args)
model.to(device)
# load pretrained model
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'])
# evaluation
vis_dir = None if args.vis_dir == "" else args.vis_dir
evaluate_single_image(model, args.img_path, device, vis_dir=vis_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser('PET evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
main(args)