Skip to content
Permalink
Browse files

Closes #596 (make pylint ask for docstrings) (#983)

* Closes #596 (make pylint ask for docstrings)

* add docstrings

* fix some things that failed on travis but not my laptop

* fix pylint version

* disable pylint in one more file

* some new no-docstring problems introduced to main branch by recent PRs

* fix pylint argument

* don't update pylint

* things that fail in old pylint but not new pylint

* another new missing docstring

* this didn't fail before
  • Loading branch information...
goodfeli committed Feb 21, 2019
1 parent 36c5e32 commit 688fe64de5bda82895cc8729348a5d761c5e7813
Showing with 269 additions and 40 deletions.
  1. +1 −2 .pylintrc
  2. +7 −0 cleverhans/attack_bundling.py
  3. +3 −0 cleverhans/attacks/__init__.py
  4. +3 −0 cleverhans/attacks/basic_iterative_method.py
  5. +1 −0 cleverhans/attacks/carlini_wagner_l2.py
  6. +1 −0 cleverhans/attacks/deep_fool.py
  7. +1 −0 cleverhans/attacks/elastic_net_method.py
  8. +1 −0 cleverhans/attacks/fast_feature_adversaries.py
  9. +26 −27 cleverhans/attacks/lbfgs.py
  10. +3 −0 cleverhans/attacks/madry_et_al.py
  11. +12 −0 cleverhans/attacks/max_confidence.py
  12. +2 −0 cleverhans/attacks/momentum_iterative_method.py
  13. +2 −0 cleverhans/attacks/projected_gradient_descent.py
  14. +1 −0 cleverhans/attacks/saliency_map_method.py
  15. +38 −2 cleverhans/attacks/spsa.py
  16. +1 −0 cleverhans/attacks_tf.py
  17. +3 −0 cleverhans/canary.py
  18. +3 −0 cleverhans/compat.py
  19. +1 −0 cleverhans/dataset.py
  20. +2 −0 cleverhans/devtools/checks.py
  21. +1 −0 cleverhans/devtools/tests/test_format.py
  22. +1 −0 cleverhans/experimental/certification/certify.py
  23. +1 −0 cleverhans/experimental/certification/dual_formulation.py
  24. +1 −0 cleverhans/experimental/certification/optimization.py
  25. +1 −0 cleverhans/experimental/certification/tests/dual_formulation_test.py
  26. +1 −0 cleverhans/experimental/certification/tests/nn_test.py
  27. +4 −3 cleverhans/experimental/certification/tests/optimization_test.py
  28. +1 −0 cleverhans/experimental/certification/tests/utils_test.py
  29. +3 −0 cleverhans/initializers.py
  30. +1 −0 cleverhans/loss.py
  31. +4 −0 cleverhans/model_zoo/all_convolutional.py
  32. +1 −0 cleverhans/model_zoo/basic_cnn.py
  33. +1 −0 cleverhans/model_zoo/deep_k_nearest_neighbors/dknn.py
  34. +1 −0 cleverhans/model_zoo/soft_nearest_neighbor_loss/SNNL_regularized_model.py
  35. +1 −0 cleverhans/model_zoo/soft_nearest_neighbor_loss/SNNL_regularized_train.py
  36. +1 −0 cleverhans/picklable_model.py
  37. +7 −0 cleverhans/utils.py
  38. +1 −0 cleverhans/utils_mnist.py
  39. +5 −0 cleverhans/utils_tf.py
  40. +7 −1 cleverhans_tutorials/__init__.py
  41. +1 −0 cleverhans_tutorials/cifar10_tutorial_tf.py
  42. +1 −0 cleverhans_tutorials/evaluate_pickled_model.py
  43. +1 −0 cleverhans_tutorials/mnist_blackbox.py
  44. +1 −0 cleverhans_tutorials/mnist_tutorial_cw.py
  45. +1 −0 cleverhans_tutorials/mnist_tutorial_jsma.py
  46. +1 −0 cleverhans_tutorials/mnist_tutorial_keras.py
  47. +1 −0 cleverhans_tutorials/mnist_tutorial_keras_tf.py
  48. +4 −0 cleverhans_tutorials/mnist_tutorial_picklable.py
  49. +1 −0 cleverhans_tutorials/mnist_tutorial_pytorch.py
  50. +4 −0 cleverhans_tutorials/mnist_tutorial_tf.py
  51. +1 −0 cleverhans_tutorials/mnist_tutorial_tfe.py
  52. +1 −0 examples/imagenet_featadvs/model.py
  53. +1 −0 examples/multigpu_advtrain/attacks_multigpu.py
  54. +1 −0 examples/multigpu_advtrain/make_model.py
  55. +1 −1 examples/multigpu_advtrain/model.py
  56. +1 −0 examples/multigpu_advtrain/run_multigpu.py
  57. +1 −0 examples/multigpu_advtrain/runner.py
  58. +1 −0 examples/multigpu_advtrain/test_run_multigpu.py
  59. +1 −0 examples/multigpu_advtrain/test_runner.py
  60. +1 −0 examples/multigpu_advtrain/trainer.py
  61. +1 −0 examples/multigpu_advtrain/utils.py
  62. +1 −0 examples/multigpu_advtrain/utils_cifar.py
  63. +1 −0 examples/multigpu_advtrain/utils_svhn.py
  64. +1 −0 examples/test_imagenet_attacks.py
  65. +1 −0 tests_pytorch/test_mnist_tutorial_pytorch.py
  66. +9 −0 tests_tf/test_attack_bundling.py
  67. +8 −0 tests_tf/test_attacks.py
  68. +1 −0 tests_tf/test_attacks_tf.py
  69. +10 −3 tests_tf/test_confidence_report.py
  70. +3 −0 tests_tf/test_dataset.py
  71. +1 −0 tests_tf/test_defenses.py
  72. +1 −0 tests_tf/test_evaluation.py
  73. +1 −0 tests_tf/test_mnist_blackbox.py
  74. +1 −0 tests_tf/test_mnist_tutorial_cw.py
  75. +1 −0 tests_tf/test_mnist_tutorial_jsma.py
  76. +1 −0 tests_tf/test_mnist_tutorial_keras.py
  77. +1 −0 tests_tf/test_mnist_tutorial_keras_tf.py
  78. +1 −0 tests_tf/test_mnist_tutorial_tf.py
  79. +16 −0 tests_tf/test_model.py
  80. +20 −1 tests_tf/test_picklable_model.py
  81. +1 −0 tests_tf/test_projected_gradient_descent.py
  82. +3 −0 tests_tf/test_serial.py
  83. +1 −0 tests_tf/test_utils.py
  84. +1 −0 tests_tf/test_utils_keras.py
  85. +1 −0 tests_tf/test_utils_tf.py
@@ -17,7 +17,6 @@ msg-template='{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'
# The type of the output of softmax_cross_entropy_with_logits is weird
# and pylint does not realize it is safe to negate it.
# C0103: Complains about the variable name 'x'
# C0111: Missing docstring
# W0613: Unused argument
# This one is disabled because pylint does not implement it well.
# In many cases we use all arguments via `locals()` and pylint does not
@@ -98,7 +97,7 @@ msg-template='{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'
# R1714: use x in [a,b] instead of x == a or x == b
# R0801: duplicate code across files
# (We have a lot of intentional duplication, like the tutorials)
disable=C0103,C0111,W0613,E1101,R0913,R0914,W0223,E0203,W0201,C1801,E1129,C0325,R1705,W0622,W0703,W0101,W0122,R0912,R0903,W0221,W0212,R1703,R0201,W0603,R0915,R0902,C0200,W1201,C0302,W0511,R0911,E0401,R0205,R1714,R0801,E1130
disable=C0103,W0613,E1101,R0913,R0914,W0223,E0203,W0201,C1801,E1129,C0325,R1705,W0622,W0703,W0101,W0122,R0912,R0903,W0221,W0212,R1703,R0201,W0603,R0915,R0902,C0200,W1201,C0302,W0511,R0911,E0401,R0205,R1714,R0801,E1130

[IMPORTS]
# Explicitly specifying this helps pylint to behave more consistently across multiple platforms.
@@ -4,6 +4,7 @@
References: https://openreview.net/forum?id=H1g0piA9tQ
https://arxiv.org/abs/1811.03685
"""
# pylint: disable=missing-docstring
import copy
import logging
import time
@@ -687,6 +688,9 @@ def print_progress(self, criteria, run_counts):
+ " times")

def filter(self, run_counts, criteria):
"""
Return run counts only for examples that are still correctly classified
"""
correctness = criteria['correctness']
assert correctness.dtype == np.bool
filtered_counts = deep_copy(run_counts)
@@ -793,6 +797,9 @@ def __init__(self, t=1., new_work_goal=None):
self.rng = np.random.RandomState([2018, 10, 7, 12])

def filter(self, run_counts, criteria):
"""
Return the counts for only those examples that are below the threshold
"""
wrong_confidence = criteria['wrong_confidence']
below_t = wrong_confidence <= self.t
filtered_counts = deep_copy(run_counts)
@@ -1,3 +1,6 @@
"""
The Attack class, providing a universal abstract interface describing attacks, and many implementations of it.
"""
from abc import ABCMeta
import collections
import warnings
@@ -6,6 +6,9 @@


class BasicIterativeMethod(ProjectedGradientDescent):
"""
The BasicIterativeMethod attack.
"""
def __init__(self, model, sess=None, dtypestr='float32', **kwargs):
super(BasicIterativeMethod, self).__init__(model, sess=sess,
dtypestr=dtypestr,
@@ -1,5 +1,6 @@
"""The CarliniWagnerL2 attack
"""
# pylint: disable=missing-docstring
import logging

import numpy as np
@@ -72,6 +72,7 @@ def generate(self, x, **kwargs):

# Define graph
def deepfool_wrap(x_val):
"""deepfool function for py_func"""
return deepfool_batch(self.sess, x, preds, logits, grads, x_val,
self.nb_candidate, self.overshoot,
self.max_iter, self.clip_min, self.clip_max,
@@ -1,5 +1,6 @@
"""The ElasticNetMethod attack.
"""
# pylint: disable=missing-docstring
import logging

import numpy as np
@@ -1,6 +1,7 @@
"""
The FastFeatureAdversaries attack
"""
# pylint: disable=missing-docstring
import warnings

import numpy as np
@@ -27,10 +27,6 @@ class LBFGS(Attack):
"""

def __init__(self, model, sess, dtypestr='float32', **kwargs):
"""
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
if not isinstance(model, Model):
wrapper_warning()
model = CallableModelWrapper(model, 'probs')
@@ -63,6 +59,9 @@ def generate(self, x, **kwargs):
self.clip_min, self.clip_max, nb_classes, self.batch_size)

def lbfgs_wrap(x_val, y_val):
"""
Wrapper creating TensorFlow interface for use with py_func
"""
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)

wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], self.tf_dtype)
@@ -103,31 +102,31 @@ def parse_params(self,


class LBFGS_impl(object):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param sess: a TF session.
:param x: A tensor with the inputs.
:param logits: A tensor with model's output logits.
:param targeted_label: A tensor with the target labels.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and cross-entropy loss of classification.
:param max_iterations: The maximum number of iterations.
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the purturbation
and cross-entropy loss of the classification.
:param clip_min: Minimum input component value
:param clip_max: Maximum input component value
:param num_labels: The number of classes in the model's output.
:param batch_size: Number of attacks to run simultaneously.
"""
def __init__(self, sess, x, logits, targeted_label,
binary_search_steps, max_iterations, initial_const, clip_min,
clip_max, nb_classes, batch_size):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param sess: a TF session.
:param x: A tensor with the inputs.
:param logits: A tensor with model's output logits.
:param targeted_label: A tensor with the target labels.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and cross-entropy loss of classification.
:param max_iterations: The maximum number of iterations.
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the purturbation
and cross-entropy loss of the classification.
:param clip_min: Minimum input component value
:param clip_max: Maximum input component value
:param num_labels: The number of classes in the model's output.
:param batch_size: Number of attacks to run simultaneously.
"""
self.sess = sess
self.x = x
self.logits = logits
@@ -161,7 +160,7 @@ def attack(self, x_val, targets):
"""

def lbfgs_objective(adv_x, self, targets, oimgs, CONST):
# returns the function value and the gradient for fmin_l_bfgs_b
""" returns the function value and the gradient for fmin_l_bfgs_b """
loss = self.sess.run(
self.loss,
feed_dict={
@@ -6,6 +6,9 @@


class MadryEtAl(ProjectedGradientDescent):
"""
The attack from Madry et al 2017
"""
def __init__(self, model, sess=None, dtypestr='float32', **kwargs):
super(MadryEtAl, self).__init__(model, sess=sess,
dtypestr=dtypestr,
@@ -62,6 +62,11 @@ def parse_params(self, y=None, nb_classes=10, **kwargs):
return True

def attack(self, x, true_y):
"""
Runs the untargeted attack.
:param x: The input
:param true_y: The correct label for `x`. This attack aims to produce misclassification.
"""
adv_x_cls = []
prob_cls = []
m = tf.shape(x)[0]
@@ -105,5 +110,12 @@ def attack(self, x, true_y):
return out

def attack_class(self, x, target_y):
"""
Run the attack on a specific target class.
:param x: tf Tensor. The input example.
:param target_y: tf Tensor. The attacker's desired target class.
Returns:
A targeted adversarial example, intended to be classified as the target class.
"""
adv = self.base_attacker.generate(x, y_target=target_y, **self.params)
return adv
@@ -73,9 +73,11 @@ def generate(self, x, **kwargs):
targeted = (self.y_target is not None)

def cond(i, _, __):
"""Iterate until number of iterations completed"""
return tf.less(i, self.nb_iter)

def body(i, ax, m):
"""Do a momentum step"""
logits = self.model.get_logits(ax)
loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
if targeted:
@@ -120,9 +120,11 @@ def generate(self, x, **kwargs):
dtypestr=self.dtypestr)

def cond(i, _):
"""Iterate until requested number of iterations is completed"""
return tf.less(i, self.nb_iter)

def body(i, adv_x):
"""Do a projected gradient step"""
adv_x = FGM.generate(adv_x, **fgm_params)

# Clipping perturbation eta to self.ord norm ball
@@ -1,5 +1,6 @@
"""The SalienceMapMethod attack
"""
# pylint: disable=missing-docstring
import warnings

import numpy as np
@@ -1,5 +1,6 @@
"""The SPSA attack
"""
# pylint: disable=missing-docstring
import warnings

import numpy as np
@@ -150,6 +151,9 @@ def generate(self,
num_iters=spsa_iters)

def loss_fn(x, label):
"""
Margin logit loss, with correct sign for targeted vs untargeted loss.
"""
logits = self.model.get_logits(x)
loss_multiplier = 1 if is_targeted else -1
return loss_multiplier * margin_logit_loss(
@@ -266,14 +270,36 @@ def _compute_gradients(self, loss_fn, x, unused_optim_state):
return tf.gradients(loss, x)

def _apply_gradients(self, grads, x, optim_state):
"""
Given a gradient, make one optimization step.
:param grads: list of tensors, same length as `x`, containing the corresponding gradients
:param x: list of tensors to update
:param optim_state: dict
Returns:
new_x: list of tensors, updated version of `x`
new_optim_state: dict, updated version of `optim_state`
"""
raise NotImplementedError(
"_apply_gradients should be defined in each subclass")

def minimize(self, loss_fn, x, optim_state):
"""
Analogous to tf.Optimizer.minimize
:param loss_fn: tf Tensor, representing the loss to minimize
:param x: list of Tensor, analogous to tf.Optimizer's var_list
:param optim_state: A possibly nested dict, containing any optimizer state.
Returns:
new_x: list of Tensor, updated version of `x`
new_optim_state: dict, updated version of `optim_state`
"""
grads = self._compute_gradients(loss_fn, x, optim_state)
return self._apply_gradients(grads, x, optim_state)

def init_optim_state(self, x):
def init_state(self, x):
"""Returns the initial state of the optimizer.
Args:
@@ -283,7 +309,7 @@ def init_optim_state(self, x):
A dictionary, representing the initial state of the optimizer.
"""
raise NotImplementedError(
"init_optim_state should be defined in each subclass")
"init_state should be defined in each subclass")


class TensorGradientDescent(TensorOptimizer):
@@ -312,6 +338,9 @@ def __init__(self, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-9):
self._epsilon = epsilon

def init_state(self, x):
"""
Initialize t, m, and u
"""
optim_state = {}
optim_state["t"] = 0.
optim_state["m"] = [tf.zeros_like(v) for v in x]
@@ -553,6 +582,13 @@ def _compute_xent(x):


def parallel_apply_transformations(x, transforms, black_border_size=0):
"""
Apply image transformations in parallel.
:param transforms: TODO
:param black_border_size: int, size of black border to apply
Returns:
Transformed images
"""
transforms = tf.convert_to_tensor(transforms, dtype=tf.float32)
x = _apply_black_border(x, black_border_size)

@@ -1,3 +1,4 @@
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -1,3 +1,6 @@
"""
Canary code that dies if the underlying hardware / drivers aren't working right.
"""
import time

import numpy as np
@@ -17,6 +17,9 @@ def _wrap(f):
Wraps a callable `f` in a function that warns that the function is deprecated.
"""
def wrapper(*args, **kwargs):
"""
Issues a deprecation warning and passes through the arguments.
"""
warnings.warn(str(f) + " is deprecated. Switch to calling the equivalent function in tensorflow. "
" This function was originally needed as a compatibility layer for old versions of tensorflow, "
" but support for those versions has now been dropped.")
@@ -1,6 +1,7 @@
"""Dataset class for CleverHans
"""
# pylint: disable=missing-docstring

from __future__ import absolute_import
from __future__ import division
@@ -14,6 +14,7 @@
import numpy as np

class CleverHansTest(unittest.TestCase):
"""TestCase with some extra features"""

def setUp(self):
self.test_start = time.time()
@@ -24,6 +25,7 @@ def tearDown(self):
print(self.id(), "took", time.time() - self.test_start, "seconds")

def assertClose(self, x, y, *args, **kwargs):
"""Assert that `x` and `y` have close to the same value"""
# self.assertTrue(np.allclose(x, y)) doesn't give a useful message
# on failure
assert np.allclose(x, y, *args, **kwargs), (x, y)
@@ -21,6 +21,7 @@
all_py_files = list_files('.py')

def update_whitelist():
"""Add files to the whitelist"""
global whitelist_pep8
# We don't want to test RL-attack because it has so many dependencies
# not used elsewhere, and pylint wants to import them all
@@ -72,6 +72,7 @@


def main(_):
# pylint: disable=missing-docstring
tf.logging.set_verbosity(FLAGS.verbosity)

start_time = time.time()
@@ -1,4 +1,5 @@
"""Code with dual formulation for certification problem."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

0 comments on commit 688fe64

Please sign in to comment.
You can’t perform that action at this time.