Skip to content

Commit

Permalink
Add defense plot and train-process plot
Browse files Browse the repository at this point in the history
  • Loading branch information
lihebi committed Aug 29, 2019
1 parent 368940c commit d6cfa30
Show file tree
Hide file tree
Showing 3 changed files with 157 additions and 20 deletions.
3 changes: 3 additions & 0 deletions AdvAE.org
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,9 @@ Probably a figure for this.
| CW | | | | | | | |
| Hop (black-box) | | | | | | | |

*** Training process plot
Training loss, validation loss, validation accuracy.



* New TODOs
Expand Down
51 changes: 51 additions & 0 deletions parse.rkt
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#lang racket

;; some utilities written in rkt

(require json)

(define (parse-train-process-from-stdout)
(define-values (new-exp collect-exp get-result)
(let ([all '()]
[current '()])
(values
(λ (name)
(set! all (append all (list current)))
;; the key of jsexpr has to be a symbol, not string
(set! current (list (string->symbol name))))
(λ (x)
(set! current (append current (list x))))
(λ ()
(let ([lst (filter (λ (x) (> (length x) 1))
(append all (list current)))])
;; change this list to a hash table
(make-hash lst))))))
(call-with-input-file "stdout.txt"
(λ (in)
;; "====== Denoising training for saved_models/MNIST-mnistcnn-cnn3AE-C0_A2_0.5-AdvAE.hdf5 .."
;; "Trainng AdvAE .."
;; "Epoch 1/100"
;; "54000/54000 [==============================] - 117s 2ms/step - loss: 1.7462 - val_loss: 1.3441"
;; "{'advacc': 0.5018382352941176, 'acc': 0.8801804812834224, 'cnnacc': 0.9908088235294118, 'obliacc': 0.7984625668449198}"
;; "Epoch 2/100"
;; "Restoring model weights from the end of the best epoch"
(let ([lines (port->lines in)])
(for ([line lines])
(cond
[(string-prefix? line "====== Denoising training for")
(new-exp (second (regexp-match #px"saved_models/(.*)\\.hdf5" line)))]
[(string-prefix? line "Trainng AdvAE ..")]
[(string-prefix? line "Epoch 1/100")]
[(string-prefix? line "54000/54000 [==============================]")
(collect-exp (string->number (second (regexp-match #px"val_loss: (\\d*\\.\\d*)" line))))]
[(string-prefix? line "{'advacc': ")]
[(string-prefix? line "Restoring model weights from the end of the best epoch")]
[else #f])))))
(get-result))

(module+ test
(jsexpr? (parse-train-process-from-stdout))
(call-with-output-file "images/train-process.json"
#:exists 'replace
(λ (out)
(write-json (parse-train-process-from-stdout) out))))
123 changes: 103 additions & 20 deletions result.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,45 @@ def plot_onto_lambda():
plt.savefig('images/onto-lambda-epsilon-0.38.pdf', bbox_inches='tight', pad_inches=0)
plt.close(fig)

def plot_defense_onto_epsilon():
"HGD, ItAdv, AdvAE"

jfiles = ['images/test-result-MNIST-mnistcnn-cnn3AE-B2.json',
'images/test-result-MNIST-mnistcnn-cnn3AE-C0_A2_1.json',
# 'images/test-result-MNIST-mnistcnn-cnn3AE-ItAdv.json',
'images/test-result-MNIST-mnistcnn-identityAE-ItAdv.json']
lam_data = []
for fname in jfiles:
res = {}
with open(fname) as fp:
j = json.load(fp)
data = j['epsilon_exp_data']
res['eps'] = [d[0] for d in data]
res['FGSM'] = [d[1] for d in data]
res['PGD'] = [d[2] for d in data]
res['Hop'] = [d[3] for d in data]
lam_data.append(res)

colors = [(0.5, x, y) for x,y in zip(np.arange(0, 1, 1 / len(lam_data)),
np.arange(0, 1, 1 / len(lam_data)))]
lams = ['HGD', 'AdvAE',
# 'ItAdv-full',
'ItAdv']
for attack in ['FGSM', 'PGD', 'Hop']:
fig = plt.figure(dpi=300)
for lam, data, marker, color in zip(lams, lam_data,
['o-']*len(lams),
colors):
plt.plot(data['eps'], data[attack], marker, color=color,
markersize=4, label='{} {}'.format(attack, lam))
plt.xlabel('Distortion')
plt.ylabel('Accuracy')
plt.legend(fontsize='small')
plt.savefig('images/defense-onto-epsilon-{}.pdf'.format(attack),
bbox_inches='tight', pad_inches=0)
plt.close(fig)


def plot_lambda_onto_epsilon():
"""Plot lambda figure, for a selected epsilon setting."""
lams = [0, 0.2, 0.5, 1, 1.5, 2, 5]
Expand All @@ -307,27 +346,25 @@ def plot_lambda_onto_epsilon():
res['Hop'] = [d[3] for d in data]
lam_data.append(res)

fig = plt.figure(dpi=300)

colors = [(0.5, x, y) for x,y in zip(np.arange(0, 1, 1 / len(lams)),
np.arange(0, 1, 1 / len(lams)))]
# colors2 = [(0.2, x, y) for x,y in zip(range(0, 1, len(lams)),
# range(0, 1, len(lams)))]
for lam, data, marker, color in zip(lams, lam_data,
# ['x-', '^-', 'o-', 's-']*2,
['o-']*len(lams),
colors):
# plt.plot(data['eps'], data['FGSM'], marker, color=color,
# markersize=4, label='FGSM {}'.format(lam))
plt.plot(data['eps'], data['PGD'], marker, color=color,
markersize=4, label='PGD $\lambda$={}'.format(lam))
# plt.plot(data['eps'], data['Hop'], marker, color=color2,
# markersize=4, label='Hop {}'.format(lam))
plt.xlabel('Distortion')
plt.ylabel('Accuracy')
plt.legend(fontsize='small')
plt.savefig('images/lambda-onto-epsilon.pdf', bbox_inches='tight', pad_inches=0)
plt.close(fig)

for attack in ['FGSM', 'PGD', 'Hop']:
fig = plt.figure(dpi=300)
for lam, data, marker, color in zip(lams, lam_data,
# ['x-', '^-', 'o-', 's-']*2,
['o-']*len(lams),
colors):
plt.plot(data['eps'], data[attack], marker, color=color,
markersize=4, label='{} $\lambda$={}'.format(attack, lam))
plt.xlabel('Distortion')
plt.ylabel('Accuracy')
plt.legend(fontsize='small')
plt.savefig('images/lambda-onto-epsilon-{}.pdf'.format(attack),
bbox_inches='tight', pad_inches=0)
plt.close(fig)

def plot_aesize_onto_epsilon():
cols = []
Expand Down Expand Up @@ -359,11 +396,57 @@ def plot_aesize_onto_epsilon():
plt.legend(fontsize='small')
plt.savefig('images/aesize-onto-epsilon.pdf', bbox_inches='tight', pad_inches=0)
plt.close(fig)


def plot_train_process():
fname = 'images/train-process.json'
fig = plt.figure(dpi=300)
keys1 = [
# different defense
'MNIST-mnistcnn-cnn1AE-ItAdv-AdvAE',
'MNIST-mnistcnn-cnn3AE-B2-AdvAE',
'MNIST-mnistcnn-cnn3AE-ItAdv-AdvAE',
'MNIST-mnistcnn-identityAE-ItAdv-AdvAE',
]
keys2 = [
# differet AE
'MNIST-mnistcnn-cnn1AE-C0_A2_1-AdvAE',
'MNIST-mnistcnn-cnn2AE-C0_A2_1-AdvAE',
'MNIST-mnistcnn-cnn3AE-C0_A2_1-AdvAE',
'MNIST-mnistcnn-fcAE-C0_A2_1-AdvAE',
'MNIST-mnistcnn-deepfcAE-C0_A2_1-AdvAE',
]
keys3 = [
# different lambdas
# 'MNIST-mnistcnn-cnn3AE-C0_A2_0-AdvAE',
# 'MNIST-mnistcnn-cnn3AE-C0_A2_0.2-AdvAE',
'MNIST-mnistcnn-cnn3AE-C0_A2_0.5-AdvAE',
'MNIST-mnistcnn-cnn3AE-C0_A2_1-AdvAE',
'MNIST-mnistcnn-cnn3AE-C0_A2_1.5-AdvAE',
'MNIST-mnistcnn-cnn3AE-C0_A2_2-AdvAE',
'MNIST-mnistcnn-cnn3AE-C0_A2_5-AdvAE'
]

keys = keys2

markers = ['o-', '*-', '+-', 'x-', '^-'] * 3
with open(fname) as fp:
j = json.load(fp)
colors = [(0.5, x, y) for x,y in zip(np.arange(0, 1, 1 / len(keys)),
np.arange(0, 1, 1 / len(keys)))]
for key, marker, color in zip(keys, markers, colors):
data = j[key]
plt.plot(data, marker, color=color,
markersize=4, label=key)
plt.xlabel('Epoch')
plt.ylabel('Val loss')
plt.legend(fontsize='small')
plt.savefig('images/train-process.pdf', bbox_inches='tight', pad_inches=0)
plt.close(fig)

def __test():
# this is the main lambda plot
plot_lambda_onto_epsilon()
plot_onto_lambda()
# plot_onto_lambda()
plot_aesize_onto_epsilon()

plot_defense_onto_epsilon()
plot_train_process()

0 comments on commit d6cfa30

Please sign in to comment.