Skip to content

Commit

Permalink
bug and consistency fixes for bb attack (#556)
Browse files Browse the repository at this point in the history
Bug and consistency fixes for bb attack addressing #545 

Co-authored-by: Wieland Brendel <wieland.brendel@uni-tuebingen.de>
  • Loading branch information
wielandbrendel and Wieland Brendel committed Jun 18, 2020
1 parent 680636c commit 36ff9e6
Showing 1 changed file with 3 additions and 5 deletions.
8 changes: 3 additions & 5 deletions foolbox/attacks/brendel_bethge.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def _minimum_norm_to_boundary(self, x, b, _ell, _u, c, bnorm):
"""
N = x.shape[0]

lambda_lower = 2 * c / bnorm ** 2
lambda_lower = 2 * c / (bnorm ** 2 + EPS)
lambda_upper = (
np.sign(c) * np.inf
) # optimal initial point (if box-constraints are neglected)
Expand Down Expand Up @@ -320,7 +320,7 @@ class BrendelBethgeAttack(MinimizationAttack, ABC):
decrease the step size in each iteration and ensure that the attack
follows the boundary more faithfully.
lr_decay : The trust region lr is multiplied with lr_decay in regular intervals (see
lr_reduction_interval).
lr_num_decay).
lr_num_decay : Number of learning rate decays in regular intervals of
length steps / lr_num_decay.
momentum : Averaging of the boundary estimation over multiple steps. A momentum of
Expand Down Expand Up @@ -384,8 +384,6 @@ def run( # noqa: C901
----------
inputs : Tensor that matches model type
The original clean inputs.
labels : Integer tensor that matches model type
The reference labels for the inputs.
criterion : Callable
A callable that returns true if the given logits of perturbed
inputs should be considered adversarial w.r.t. to the given labels
Expand Down Expand Up @@ -481,7 +479,7 @@ def logits_diff_and_grads(x) -> Tuple[Any, Any]:

x = starting_points
lrs = self.lr * np.ones(N)
lr_reduction_interval = min(1, int(self.steps / self.lr_num_decay))
lr_reduction_interval = max(1, int(self.steps / self.lr_num_decay))
converged = np.zeros(N, dtype=np.bool)
rate_normalization = np.prod(x.shape) * (max_ - min_)
original_shape = x.shape
Expand Down

0 comments on commit 36ff9e6

Please sign in to comment.