Skip to content

Commit

Permalink
CAM generated
Browse files Browse the repository at this point in the history
  • Loading branch information
Yorkbenno committed Sep 16, 2021
1 parent fb150d5 commit 4762e98
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 14 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ __pycache__
network/__pycache__/
*.pth
real_test.py
modelstates/
modelstates/
out_cam/
5 changes: 3 additions & 2 deletions dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ def online_cut_patches(im, im_size=56, stride=28):
w_ = np.append(w_, w - im_size)

for i in h_:
for j in w_:
im_list.append(im[i:i+im_size,j:j+im_size,:].copy())
for j in w_:
temp = Image.fromarray(np.uint8(im[i:i+im_size,j:j+im_size,:].copy()))
im_list.append(temp)
position_list.append((i,j))
return im_list, position_list
22 changes: 13 additions & 9 deletions generate_CAM.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,14 @@
side_length = 56
out_cam = "./out_cam"
net = network.ResNetCAM()
path = "./model_last.pth"
path = "./modelstates/model_last.pth"
pretrained = torch.load(path)['model']
# pretrained_modify = {k[7:] : v for k, v in pretrained.items()}
# pretrained_modify['fc1.weight'] = pretrained_modify['fc1.weight'].unsqueeze(-1).unsqueeze(-1)
# pretrained_modify['fc2.weight'] = pretrained_modify['fc2.weight'].unsqueeze(-1).unsqueeze(-1)
pretrained['fc1.weight'] = pretrained['fc1.weight'].unsqueeze(-1).unsqueeze(-1)
pretrained['fc2.weight'] = pretrained['fc2.weight'].unsqueeze(-1).unsqueeze(-1)
pretrained['fc1.weight'] = pretrained['fc1.weight'].unsqueeze(-1).unsqueeze(-1).to(torch.float64)
pretrained['fc2.weight'] = pretrained['fc2.weight'].unsqueeze(-1).unsqueeze(-1).to(torch.float64)
# print(pretrained['fc2.bias'].type())

net.load_state_dict(pretrained)
print(f'Model loaded from {path} Successfully')
Expand All @@ -30,7 +31,7 @@
net.eval()


onlineDataset = dataset.OnlineDataset("/", transform=transforms.Compose([
onlineDataset = dataset.OnlineDataset("./Dataset/2.validation/img", transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]))
Expand All @@ -40,22 +41,25 @@

for im_path, im_list, position_list in tqdm(onlineDataloader):
orig_img = np.asarray(Image.open(im_path[0]))
position_list = position_list[0]
# print(position_list)
# print(im_path)
# exit()
# position_list = position_list[0]
def tocamlist(im):
# create batch with size 1
im = im.unsqueeze(0)
im = im.cuda()
cam_scores = net(im)
# expected shape is batch_size * channel * h * w
cam_scores = F.interpolate(cam_scores, (side_length, side_length), mode='bilinear', align_corners=False)[0].numpy()
cam_scores = F.interpolate(cam_scores, (side_length, side_length), mode='bilinear', align_corners=False)[0].detach().cpu().numpy()
return cam_scores
cam_list = map(tocamlist, im_list[0])
cam_list = list(map(tocamlist, im_list[0]))

# merge crops
sum_cam = np.zeros((3, orig_img.shape[0], orig_img.shape[1]))
sum_counter = np.zeros_like(sum_cam)
for i in range(len(cam_list)):
x, y = position_list[i]
y, x = position_list[i][0].item(), position_list[i][1].item()
crop = cam_list[i]
sum_cam[:, y:y+side_length, x:x+side_length] += crop
sum_counter[:, y:y+side_length, x:x+side_length] += 1
Expand All @@ -70,4 +74,4 @@ def tocamlist(im):
if out_cam is not None:
if not os.path.exists(out_cam):
os.makedirs(out_cam)
np.save(os.path.join(out_cam, im_path.split('/')[-1].split('.')[0] + '.npy'), norm_cam)
np.save(os.path.join(out_cam, im_path[0].split('/')[-1].split('.')[0] + '.npy'), norm_cam)
5 changes: 3 additions & 2 deletions network/resnet_cam.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,8 @@ def __init__(self, block=Bottleneck, layers=[3, 4, 23, 3], base_channels=64):
self.layer3 = self._make_layer(block, base_channels * 4, layers[2], dilation=2)
self.layer4 = self._make_layer(block, base_channels * 8, layers[3], dilation=4)

self.fc1 = torch.nn.Conv2d(2048, 128, 1, stride=1, padding=0, dilation = 0, bias=True)
self.fc2 = torch.nn.Conv2d(128, 3, 1, stride=1, padding=0, dilation = 0, bias=True)
self.fc1 = torch.nn.Conv2d(2048, 128, 1, stride=1, padding=0, bias=True)
self.fc2 = torch.nn.Conv2d(128, 3, 1, stride=1, padding=0, bias=True)
self.normalize = Normalize()
self.not_training = []

Expand Down Expand Up @@ -138,6 +138,7 @@ def forward(self, x):
x2 = self.layer2(x)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
# print(x4.type())

result = self.fc1(x4)
result = self.fc2(result)
Expand Down

0 comments on commit 4762e98

Please sign in to comment.