Skip to content

Commit

Permalink
change formatting, compatibility
Browse files Browse the repository at this point in the history
  • Loading branch information
Xuanqing Liu committed Aug 3, 2018
1 parent 79f2ba7 commit 9c20ee2
Show file tree
Hide file tree
Showing 16 changed files with 69 additions and 109 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
__pycache__
*.pyc
*.png
error.txt
6 changes: 3 additions & 3 deletions acc_under_attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def load_model():
from dis_models.resnet import ResNetAC
dis = ResNetAC(ch=opt.ndf, n_classes=opt.nclass)
else:
raise ValueError("Unknown model name: {}".format(opt.model))
raise ValueError(f"Unknown model name: {opt.model}")
if opt.ngpu > 0:
dis = dis.cuda()
dis = torch.nn.DataParallel(dis, device_ids=range(opt.ngpu))
Expand All @@ -65,7 +65,7 @@ def make_dataset():
data = CIFAR10(root=opt.root, train=True, download=False, transform=trans)
loader = DataLoader(data, batch_size=100, shuffle=True, num_workers=opt.workers)
else:
raise ValueError("Unknown dataset: {}".format(opt.dataset))
raise ValueError(f"Unknown dataset: {opt.dataset}")
return loader


Expand All @@ -91,7 +91,7 @@ def main(epsilon):
label_correct = idx.eq(y_real)
correct_label += torch.sum(label_correct)
total += y_real.numel()
print('{}, {}'.format(epsilon, correct_label / total))
print(f'{epsilon}, {correct_label/total}')

if __name__ == "__main__":
print('#c, accuracy')
Expand Down
20 changes: 5 additions & 15 deletions dis_models/preact_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,23 +15,19 @@ class PreActBlock(nn.Module):

def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
#self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
nn.init.kaiming_normal(self.conv1.weight, mode='fan_out')
#self.bn2 = nn.BatchNorm2d(planes)
nn.init.kaiming_normal_(self.conv1.weight, mode='fan_out')
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
nn.init.kaiming_normal(self.conv2.weight, mode='fan_out')
nn.init.kaiming_normal_(self.conv2.weight, mode='fan_out')
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)

def forward(self, x):
#out = F.relu(self.bn1(x))
out = F.leaky_relu(x, 0.2)
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
#out = self.conv2(F.relu(self.bn2(out)))
out = self.conv2(F.leaky_relu(out, 0.2))
out += shortcut
return out
Expand All @@ -43,29 +39,23 @@ class PreActBottleneck(nn.Module):

def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
#self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
nn.init.kaiming_normal(self.conv1.weight, mode='fan_out')
#self.bn2 = nn.BatchNorm2d(planes)
nn.init.kaiming_normal_(self.conv1.weight, mode='fan_out')
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
nn.init.kaiming_normal(self.conv2.weight, mode='fan_out')
#self.bn3 = nn.BatchNorm2d(planes)
nn.init.kaiming_normal_(self.conv2.weight, mode='fan_out')
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.nn.kaiming_normal(self.conv3.weight, mode='fan_out')
self.nn.kaiming_normal_(self.conv3.weight, mode='fan_out')

if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)

def forward(self, x):
#out = F.relu(self.bn1(x))
out = F.leaky_relu(x, 0.2)
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
#out = self.conv2(F.relu(self.bn2(out)))
out = self.conv2(F.leaky_relu(out, 0.2))
#out = self.conv3(F.relu(self.bn3(out)))
out = self.conv3(F.leaky_relu(out, 0.2))
out += shortcut
return out
Expand Down
4 changes: 2 additions & 2 deletions dis_models/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ def __init__(self, ch=64, n_classes=0, activation=F.relu, bn=False):
self.block5 = Block(ch * 8, ch * 16, activation=activation, downsample=True, bn=bn)
self.block6 = Block(ch * 16, ch * 16, activation=activation, downsample=False, bn=bn)
self.l7 = nn.Linear(ch * 16, 1)
nn.init.xavier_uniform(self.l7.weight, gain=1.0)
nn.init.xavier_uniform_(self.l7.weight, gain=1.0)
if n_classes > 0:
self.l_y = nn.Linear(ch * 16, n_classes)
nn.init.xavier_uniform(self.l_y.weight, gain=1.0)
nn.init.xavier_uniform_(self.l_y.weight, gain=1.0)

def forward(self, x):
h = x
Expand Down
4 changes: 2 additions & 2 deletions dis_models/resnet_32.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ def __init__(self, ch=64, n_classes=0, activation=F.relu, bn=False):
self.block3 = Block(ch * 2, ch * 2, activation=activation, downsample=False, bn=bn)
self.block4 = Block(ch * 2, ch * 2, activation=activation, downsample=False, bn=bn)
self.l5 = nn.Linear(ch * 2, 1)
nn.init.xavier_uniform(self.l5.weight, gain=1.0)
nn.init.xavier_uniform_(self.l5.weight, gain=1.0)
if n_classes > 0:
self.l_y = nn.Linear(ch * 2, n_classes)
nn.init.xavier_uniform(self.l_y.weight, gain=1.0)
nn.init.xavier_uniform_(self.l_y.weight, gain=1.0)

def forward(self, x):
h = x
Expand Down
4 changes: 2 additions & 2 deletions dis_models/resnet_64.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@ def __init__(self, ch=64, n_classes=0, activation=F.relu):
self.block4 = Block(ch * 4, ch * 8, activation=activation, downsample=True)
self.block5 = Block(ch * 8, ch * 16, activation=activation, downsample=True)
self.l6 = nn.Linear(ch * 16, 1)
nn.init.xavier_uniform(self.l6.weight, gain=1.0)
nn.init.xavier_uniform_(self.l6.weight, gain=1.0)
if n_classes > 0:
self.l_y = nn.Linear(ch * 16, n_classes)
nn.init.xavier_uniform(self.l_y.weight, gain=1.0)
nn.init.xavier_uniform_(self.l_y.weight, gain=1.0)
def forward(self, x):
h = x
h = self.block1(h)
Expand Down
28 changes: 0 additions & 28 deletions error.txt

This file was deleted.

10 changes: 6 additions & 4 deletions eval_inception.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def load_model():
from gen_models.resnet_small import ResNetGenerator
gen = ResNetGenerator(ch=opt.ngf, dim_z=opt.nz, bottom_width=opt.start_width, n_classes=opt.nclass)
else:
raise ValueError("Unknown model name: {}".format(opt.model))
raise ValueError(f"Unknown model name: {opt.model}")
if opt.ngpu > 0:
gen = gen.cuda()
gen = torch.nn.DataParallel(gen, device_ids=range(opt.ngpu))
Expand All @@ -47,7 +47,8 @@ def load_model():
def load_inception():
inception_model = inception_v3(pretrained=True, transform_input=False)
inception_model.cuda()
inception_model = torch.nn.DataParallel(inception_model, device_ids=range(opt.ngpu))
inception_model = torch.nn.DataParallel(inception_model, \
device_ids=range(opt.ngpu))
inception_model.eval()
return inception_model

Expand Down Expand Up @@ -76,12 +77,13 @@ def gen_imgs():
def calc_inception():
imgs, resize = gen_imgs()
model = load_inception()
mean_score, std_score = score(model, imgs, opt.batch_size, resize, opt.splits)
mean_score, std_score = score(model, imgs, opt.batch_size, \
resize, opt.splits)
return mean_score, std_score

def main():
mean, std = calc_inception()
print("Mean: {}, Std: {}".format(mean, std))
print(f"Mean: {mean}, Std: {std}")

if __name__ == "__main__":
main()
10 changes: 5 additions & 5 deletions eval_inception.sh
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
#!/bin/bash

for i in {120..200}; do
CUDA_VISIBLE_DEVICES=0,1,4,5 python ./eval_inception.py \
for i in {80..175}; do
CUDA_VISIBLE_DEVICES=1,2,3,4,5 python ./eval_inception.py \
--model resnet_64 \
--model_in ./ckpt.adv-0.64px-acloss/gen_epoch_$i.pth \
--model_in ./ckpt.adv-5.64px-acloss/gen_epoch_$i.pth \
--nz 128 \
--ngf 64 \
--nclass 143 \
--nimgs 50000 \
--batch_size 100 \
--batch_size 200 \
--start_width 4 \
--splits 10 \
--ngpu 4 2>error.txt
--ngpu 5 2>error.txt
done
8 changes: 4 additions & 4 deletions finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,15 +57,15 @@ def load_models():
gen = ResNetGenerator(ch=opt.ngf, dim_z=opt.nz, bottom_width=opt.start_width, n_classes=opt.nclass)
dis = ResNetAC(ch=opt.ndf, n_classes=opt.nclass)
else:
raise ValueError("Unknown model name: {}".format(opt.model))
raise ValueError(f"Unknown model name: {opt.model}")
if opt.ngpu > 0:
gen, dis = gen.cuda(), dis.cuda()
gen, dis = torch.nn.DataParallel(gen, device_ids=range(opt.ngpu)), \
torch.nn.DataParallel(dis, device_ids=range(opt.ngpu))
else:
raise ValueError("Must run on gpus, ngpu > 0")
gen.load_state_dict(torch.load(opt.netG))
#dis.load_state_dict(torch.load(opt.netD))
dis.load_state_dict(torch.load(opt.netD))
return gen, dis

def make_dataset():
Expand All @@ -79,7 +79,7 @@ def make_dataset():
data = CIFAR10(root=opt.root, train=True, download=False, transform=trans)
data_test = CIFAR10(root=opt.root, train=False, download=False, transform=trans)
loader = DataLoader(data, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
loader_test = DataLoader(data, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
loader_test = DataLoader(data_test, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
elif opt.dataset == "dog_and_cat_64":
trans = tfs.Compose([
tfs.RandomResizedCrop(opt.img_width, scale=(0.8, 0.9), ratio=(1.0, 1.0)),
Expand Down Expand Up @@ -112,7 +112,7 @@ def make_dataset():
data = ImageFolder(opt.root, transform=trans)
loader = DataLoader(data, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
else:
raise ValueError("Unknown dataset: {}".format(opt.dataset))
raise ValueError(f"Unknown dataset: {opt.dataset}")
return loader, loader_test

def test_acc(loader_test, dis):
Expand Down
6 changes: 3 additions & 3 deletions gen_models/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,16 @@ def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=F.relu, \
self.dim_z = dim_z
self.n_classes = n_classes
self.l1 = nn.Linear(dim_z, (bottom_width ** 2) * ch * 16)
nn.init.xavier_uniform(self.l1.weight, 1.0)
nn.init.xavier_uniform_(self.l1.weight, 1.0)
self.block2 = Block(ch * 16, ch * 16, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = Block(ch * 16, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = Block(ch * 8, ch * 4, activation=activation, upsample=True, n_classes=n_classes)
self.block5 = Block(ch * 4, ch * 2, activation=activation, upsample=True, n_classes=n_classes)
self.block6 = Block(ch * 2, ch * 1, activation=activation, upsample=True, n_classes=n_classes)
self.b7 = nn.BatchNorm2d(ch)
nn.init.constant(self.b7.weight, 1.0) #XXX this is different from default initialization method
nn.init.constant_(self.b7.weight, 1.0) #XXX this is different from default initialization method
self.l7 = nn.Conv2d(ch, 3, kernel_size=3, stride=1, padding=1)
nn.init.xavier_uniform(self.l7.weight, 1.0)
nn.init.xavier_uniform_(self.l7.weight, 1.0)

def forward(self, z, y):
h = z
Expand Down
6 changes: 3 additions & 3 deletions gen_models/resnet_32.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@ def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=F.relu, \
self.dim_z = dim_z
self.n_classes = n_classes
self.l1 = nn.Linear(dim_z, (bottom_width ** 2) * ch * 16)
nn.init.xavier_uniform(self.l1.weight, 1.0)
nn.init.xavier_uniform_(self.l1.weight, 1.0)
self.block2 = Block(ch * 16, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = Block(ch * 8, ch * 4, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = Block(ch * 4, ch * 2, activation=activation, upsample=True, n_classes=n_classes)
self.block5 = Block(ch * 2, ch * 1, activation=activation, upsample=False, n_classes=n_classes)
self.b6 = nn.BatchNorm2d(ch)
nn.init.constant(self.b6.weight, 1.0) #XXX this is different from default initialization method
nn.init.constant_(self.b6.weight, 1.0) #XXX this is different from default initialization method
self.l6 = nn.Conv2d(ch, 3, kernel_size=3, stride=1, padding=1)
nn.init.xavier_uniform(self.l6.weight, 1.0)
nn.init.xavier_uniform_(self.l6.weight, 1.0)

def forward(self, z, y):
h = z
Expand Down
22 changes: 11 additions & 11 deletions gen_models/resnet_32_unsup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,17 @@ class UpResBlock(nn.Module):
def __init__(self, ch):
super(UpResBlock, self).__init__()
self.c0 = nn.Conv2d(ch, ch, 3, 1, 1)
nn.init.normal(self.c0.weight, 0.02)
nn.init.normal_(self.c0.weight, 0.02)
self.c1 = nn.Conv2d(ch, ch, 3, 1, 1)
nn.init.normal(self.c1.weight, 0.02)
nn.init.normal_(self.c1.weight, 0.02)
self.cs = nn.Conv2d(ch, ch, 3, 1, 1)
nn.init.normal(self.cs.weight, 0.02)
nn.init.normal_(self.cs.weight, 0.02)
self.bn0 = nn.BatchNorm2d(ch)
nn.init.constant(self.bn0.weight, 1.0)
nn.init.constant(self.bn0.bias, 0.0)
nn.init.constant_(self.bn0.weight, 1.0)
nn.init.constant_(self.bn0.bias, 0.0)
self.bn1 = nn.BatchNorm2d(ch)
nn.init.constant(self.bn0.weight, 1.0)
nn.init.constant(self.bn0.bias, 0.0)
nn.init.constant_(self.bn0.weight, 1.0)
nn.init.constant_(self.bn0.bias, 0.0)

@classmethod
def upsample(cls, x):
Expand All @@ -37,15 +37,15 @@ def __init__(self, ch=64, dim_z=128, bottom_width=4):
self.dim_z = dim_z
self.ch = ch
self.l0 = nn.Linear(dim_z, (bottom_width ** 2) * ch * 4)
nn.init.normal(self.l0.weight, math.sqrt(1.0 / dim_z))
nn.init.normal_(self.l0.weight, math.sqrt(1.0 / dim_z))
self.r0 = UpResBlock(ch * 4)
self.r1 = UpResBlock(ch * 4)
self.r2 = UpResBlock(ch * 4)
self.bn2 = nn.BatchNorm2d(ch * 4)
nn.init.constant(self.bn2.weight, 1.0)
nn.init.constant(self.bn2.bias, 0.0)
nn.init.constant_(self.bn2.weight, 1.0)
nn.init.constant_(self.bn2.bias, 0.0)
self.c3 = nn.Conv2d(ch * 4, 3, 3, 1, 1)
nn.init.normal(self.c3.weight, 0.02)
nn.init.normal_(self.c3.weight, 0.02)

def forward(self, x):
h = F.relu(self.l0(x))
Expand Down
6 changes: 3 additions & 3 deletions gen_models/resnet_64.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@ def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=F.relu, \
self.dim_z = dim_z
self.n_classes = n_classes
self.l1 = nn.Linear(dim_z, (bottom_width ** 2) * ch * 16)
nn.init.xavier_uniform(self.l1.weight, 1.0)
nn.init.xavier_uniform_(self.l1.weight, 1.0)
self.block2 = Block(ch * 16, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = Block(ch * 8, ch * 4, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = Block(ch * 4, ch * 2, activation=activation, upsample=True, n_classes=n_classes)
self.block5 = Block(ch * 2, ch * 1, activation=activation, upsample=True, n_classes=n_classes)
self.b6 = nn.BatchNorm2d(ch)
nn.init.constant(self.b6.weight, 1.0) #XXX this is different from default initialization method
nn.init.constant_(self.b6.weight, 1.0) #XXX this is different from default initialization method
self.l6 = nn.Conv2d(ch, 3, kernel_size=3, stride=1, padding=1)
nn.init.xavier_uniform(self.l6.weight, 1.0)
nn.init.xavier_uniform_(self.l6.weight, 1.0)

def forward(self, z, y):
h = z
Expand Down
Loading

0 comments on commit 9c20ee2

Please sign in to comment.