diff --git a/.travis.yml b/.travis.yml index 6d8b3428b0..4cb12415df 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,7 +37,6 @@ install: - conda install libgcc - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/travis/miniconda/envs/test-environment/lib - export PYTHONPATH=".":$PYTHONPATH - - pip freeze script: - - python -m unittest discover + - ./run_tests.sh diff --git a/art/attacks/adversarial_patch.py b/art/attacks/adversarial_patch.py index d114751c4e..2343f0de7d 100644 --- a/art/attacks/adversarial_patch.py +++ b/art/attacks/adversarial_patch.py @@ -85,7 +85,7 @@ def generate(self, x, y=None): """ Generate adversarial samples and return them in an array. - :param x: An array with the original inputs. + :param x: An array with the original inputs. `x` is expected to have spatial dimensions. :type x: `np.ndarray` :param y: An array with the original labels to be predicted. :type y: `np.ndarray` @@ -93,6 +93,11 @@ def generate(self, x, y=None): :rtype: `np.ndarray` """ logger.info('Creating adversarial patch.') + + if len(x.shape) == 2: + raise ValueError('Feature vectors detected. The adversarial patch can only be applied to data with spatial ' + 'dimensions.') + self.patch = (np.random.standard_normal(size=self.patch_shape)) * 20.0 for i_step in range(self.max_iter): diff --git a/art/attacks/boundary.py b/art/attacks/boundary.py index b33677d744..c3d4a0df04 100644 --- a/art/attacks/boundary.py +++ b/art/attacks/boundary.py @@ -21,6 +21,7 @@ import numpy as np +from art import NUMPY_DTYPE from art.attacks.attack import Attack logger = logging.getLogger(__name__) @@ -28,8 +29,8 @@ class BoundaryAttack(Attack): """ - Implementation of the boundary attack from Wieland Brendel et al. (2018). - Paper link: https://arxiv.org/abs/1712.04248 + Implementation of the boundary attack from Wieland Brendel et al. (2018). This is a powerful black-box attack that + only requires final class prediction. Paper link: https://arxiv.org/abs/1712.04248 """ attack_params = Attack.attack_params + ['targeted', 'delta', 'epsilon', 'step_adapt', 'max_iter', 'sample_size', 'init_size'] @@ -77,42 +78,38 @@ def generate(self, x, y=None): :return: An array holding the adversarial examples. :rtype: `np.ndarray` """ - # Prediction from the original images - preds = np.argmax(self.classifier.predict(x), axis=1) - # Assert that, if attack is targeted, y is provided if self.targeted and y is None: raise ValueError('Target labels `y` need to be provided for a targeted attack.') # Some initial setups - x_adv = x.copy() + x_adv = x.astype(NUMPY_DTYPE) if y is not None: y = np.argmax(y, axis=1) + preds = np.argmax(self.classifier.predict(x), axis=1) # Generate the adversarial samples for ind, val in enumerate(x_adv): if self.targeted: - x_ = self._perturb(x=val, y=y[ind], y_p=preds[ind]) + x_adv[ind] = self._perturb(x=val, y_p=preds[ind], y=y[ind]) else: - x_ = self._perturb(x=val, y=None, y_p=preds[ind]) - - x_adv[ind] = x_ + x_adv[ind] = self._perturb(x=val, y_p=preds[ind]) logger.info('Success rate of Boundary attack: %.2f%%', (np.sum(preds != np.argmax(self.classifier.predict(x_adv), axis=1)) / x.shape[0])) return x_adv - def _perturb(self, x, y, y_p): + def _perturb(self, x, y_p, y=None): """ Internal attack function for one example. :param x: An array with one original input to be attacked. :type x: `np.ndarray` - :param y: If `self.targeted` is true, then `y` represents the target label. - :type y: `int` :param y_p: The predicted label of x. :type y_p: `int` + :param y: If `self.targeted` is true, then `y` represents the target label. + :type y: `int` :return: an adversarial example. """ # First, create an initial adversarial sample @@ -148,11 +145,13 @@ def _attack(self, initial_sample, original_sample, target, initial_delta, initia :return: an adversarial example. :rtype: `np.ndarray` """ + def compare(object1, object2): + return object1 == object2 if self.targeted else object1 != object2 + # Get initialization for some variables x_adv = initial_sample delta = initial_delta epsilon = initial_epsilon - clip_min, clip_max = self.classifier.clip_values # Main loop to wander around the boundary for _ in range(self.max_iter): @@ -161,22 +160,15 @@ def _attack(self, initial_sample, original_sample, target, initial_delta, initia potential_advs = [] for _ in range(self.sample_size): potential_adv = x_adv + self._orthogonal_perturb(delta, x_adv, original_sample) - potential_adv = np.clip(potential_adv, clip_min, clip_max) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + np.clip(potential_adv, self.classifier.clip_values[0], self.classifier.clip_values[1], + out=potential_adv) potential_advs.append(potential_adv) preds = np.argmax(self.classifier.predict(np.array(potential_advs)), axis=1) - - if self.targeted: - satisfied = (preds == target) - else: - satisfied = (preds != target) - + satisfied = compare(preds, target) delta_ratio = np.mean(satisfied) - - if delta_ratio < 0.5: - delta *= self.step_adapt - else: - delta /= self.step_adapt + delta = delta * self.step_adapt if delta_ratio < .5 else delta / self.step_adapt if delta_ratio > 0: x_adv = potential_advs[np.where(satisfied)[0][0]] @@ -191,15 +183,13 @@ def _attack(self, initial_sample, original_sample, target, initial_delta, initia perturb = original_sample - x_adv perturb *= epsilon potential_adv = x_adv + perturb - potential_adv = np.clip(potential_adv, clip_min, clip_max) - pred = np.argmax(self.classifier.predict(np.array([potential_adv])), axis=1)[0] + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + np.clip(potential_adv, self.classifier.clip_values[0], self.classifier.clip_values[1], + out=potential_adv) - if self.targeted: - satisfied = (pred == target) - else: - satisfied = (pred != target) + pred = np.argmax(self.classifier.predict(np.array([potential_adv])), axis=1)[0] - if satisfied: + if compare(pred, target): x_adv = potential_adv epsilon /= self.step_adapt break @@ -225,7 +215,7 @@ def _orthogonal_perturb(self, delta, current_sample, original_sample): :return: a possible perturbation. """ # Generate perturbation randomly - perturb = np.random.randn(current_sample.shape[0], current_sample.shape[1], current_sample.shape[2]) + perturb = np.random.randn(*self.classifier.input_shape) # Rescale the perturbation perturb /= np.linalg.norm(perturb) @@ -254,7 +244,7 @@ def _orthogonal_perturb(self, delta, current_sample, original_sample): return perturb - def _init_sample(self, x, y, y_p): + def _init_sample(self, x, y, y_pred): """ Find initial adversarial example for the attack. @@ -262,44 +252,32 @@ def _init_sample(self, x, y, y_p): :type x: `np.ndarray` :param y: If `self.targeted` is true, then `y` represents the target label. :type y: `int` - :param y_p: The predicted label of x. - :type y_p: `int` + :param y_pred: The predicted label of x. + :type y_pred: `int` :return: an adversarial example. """ - clip_min, clip_max = self.classifier.clip_values nprd = np.random.RandomState() initial_sample = None - if self.targeted: - # Attack satisfied - if y == y_p: - return None - - # Attack unsatisfied yet - for _ in range(self.init_size): - random_img = nprd.uniform(clip_min, clip_max, size=x.shape).astype(x.dtype) - random_class = np.argmax(self.classifier.predict(np.array([random_img])), axis=1)[0] - - if random_class == y: - initial_sample = random_img + # Attack satisfied + if self.targeted and y == y_pred: + return None - logging.info('Found initial adversarial image for targeted attack.') - break - else: - logging.warning('Failed to draw a random image that is adversarial, attack failed.') - - else: - for _ in range(self.init_size): - random_img = nprd.uniform(clip_min, clip_max, size=x.shape).astype(x.dtype) - random_class = np.argmax(self.classifier.predict(np.array([random_img])), axis=1)[0] - - if random_class != y_p: - initial_sample = random_img - - logging.info('Found initial adversarial image for untargeted attack.') - break + # Attack unsatisfied yet + for _ in range(self.init_size): + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + random_sample = nprd.uniform(self.classifier.clip_values[0], self.classifier.clip_values[1], + size=x.shape).astype(x.dtype) else: - logging.warning('Failed to draw a random image that is adversarial, attack failed.') + # TODO Adjust following feature-wise and for entire sample provided by user? + mean_, std_ = np.mean(x), np.std(x) + random_sample = nprd.normal(loc=mean_, scale=2 * std_, size=x.shape).astype(x.dtype) + random_class = np.argmax(self.classifier.predict(np.array([random_sample])), axis=1)[0] + + if (self.targeted and random_class == y) or (not self.targeted and random_class != y_pred): + initial_sample = random_sample + logging.info('Found initial adversarial image for attack.') + break return initial_sample diff --git a/art/attacks/carlini.py b/art/attacks/carlini.py index bcff57fa79..51ebc2a9c5 100644 --- a/art/attacks/carlini.py +++ b/art/attacks/carlini.py @@ -127,7 +127,7 @@ def _loss(self, x, x_adv, target, c): return z, l2dist, c*loss + l2dist - def _gradient_of_loss(self, z, target, x, x_adv, x_adv_tanh, c, clip_min, clip_max): + def _loss_gradient(self, z, target, x, x_adv, x_adv_tanh, c, clip_min, clip_max): """ Compute the gradient of the loss function. @@ -185,7 +185,10 @@ def generate(self, x, y=None): :rtype: `np.ndarray` """ x_adv = x.astype(NUMPY_DTYPE) - (clip_min, clip_max) = self.classifier.clip_values + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + clip_min, clip_max = self.classifier.clip_values + else: + clip_min, clip_max = np.amin(x), np.amax(x) # Assert that, if attack is targeted, y_val is provided: if self.targeted and y is None: @@ -204,8 +207,7 @@ def generate(self, x, y=None): x_batch = x_adv[batch_index_1:batch_index_2] y_batch = y[batch_index_1:batch_index_2] - # The optimization is performed in tanh space to keep the - # adversarial images bounded from clip_min and clip_max. + # The optimization is performed in tanh space to keep the adversarial images bounded in correct range x_batch_tanh = original_to_tanh(x_batch, clip_min, clip_max, self._tanh_smoother) # Initialize binary search: @@ -256,9 +258,9 @@ def generate(self, x, y=None): # compute gradient: logger.debug('Compute loss gradient') - perturbation_tanh = -self._gradient_of_loss(z[active], y_batch[active], x_batch[active], - x_adv_batch[active], x_adv_batch_tanh[active], - c[active], clip_min, clip_max) + perturbation_tanh = -self._loss_gradient(z[active], y_batch[active], x_batch[active], + x_adv_batch[active], x_adv_batch_tanh[active], + c[active], clip_min, clip_max) # perform line search to optimize perturbation # first, halve the learning rate until perturbation actually decreases the loss: @@ -496,7 +498,7 @@ def _loss(self, x_adv, target): return z, loss - def _gradient_of_loss(self, z, target, x_adv, x_adv_tanh, clip_min, clip_max): + def _loss_gradient(self, z, target, x_adv, x_adv_tanh, clip_min, clip_max): """ Compute the gradient of the loss function. @@ -545,6 +547,11 @@ def generate(self, x, y=None): """ x_adv = x.astype(NUMPY_DTYPE) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + clip_min_per_pixel, clip_max_per_pixel = self.classifier.clip_values + else: + clip_min_per_pixel, clip_max_per_pixel = np.amin(x), np.amax(x) + # Assert that, if attack is targeted, y_val is provided: if self.targeted and y is None: raise ValueError('Target labels `y` need to be provided for a targeted attack.') @@ -562,7 +569,7 @@ def generate(self, x, y=None): x_batch = x_adv[batch_index_1:batch_index_2] y_batch = y[batch_index_1:batch_index_2] - (clip_min_per_pixel, clip_max_per_pixel) = self.classifier.clip_values + # Determine values for later clipping clip_min = np.clip(x_batch - self.eps, clip_min_per_pixel, clip_max_per_pixel) clip_max = np.clip(x_batch + self.eps, clip_min_per_pixel, clip_max_per_pixel) @@ -592,8 +599,8 @@ def generate(self, x, y=None): # compute gradient: logger.debug('Compute loss gradient') - perturbation_tanh = -self._gradient_of_loss(z[active], y_batch[active], x_adv_batch[active], - x_adv_batch_tanh[active], clip_min[active], clip_max[active]) + perturbation_tanh = -self._loss_gradient(z[active], y_batch[active], x_adv_batch[active], + x_adv_batch_tanh[active], clip_min[active], clip_max[active]) # perform line search to optimize perturbation # first, halve the learning rate until perturbation actually decreases the loss: diff --git a/art/attacks/deepfool.py b/art/attacks/deepfool.py index eccb179cb9..fa0c4d673e 100644 --- a/art/attacks/deepfool.py +++ b/art/attacks/deepfool.py @@ -21,6 +21,7 @@ import numpy as np +from art import NUMPY_DTYPE from art.attacks.attack import Attack logger = logging.getLogger(__name__) @@ -64,8 +65,7 @@ def generate(self, x, y=None): :return: An array holding the adversarial examples. :rtype: `np.ndarray` """ - clip_min, clip_max = self.classifier.clip_values - x_adv = x.copy() + x_adv = x.astype(NUMPY_DTYPE) preds = self.classifier.predict(x, logits=True) # Determine the class labels for which to compute the gradients @@ -112,11 +112,16 @@ def generate(self, x, y=None): value[np.arange(len(value)), labels_indices] = np.inf l = np.argmin(value, axis=1) r = (abs(f_diff[np.arange(len(f_diff)), l]) / (pow(np.linalg.norm(grad_diff[np.arange(len( - grad_diff)), l].reshape(len(grad_diff), -1), axis=1), 2) + tol))[:, None, None, None] * \ - grad_diff[np.arange(len(grad_diff)), l] + grad_diff)), l].reshape(len(grad_diff), -1), axis=1), 2) + tol)) + r = r.reshape((-1,) + (1,) * (len(x.shape) - 1)) + r = r * grad_diff[np.arange(len(grad_diff)), l] # Add perturbation and clip result - batch[active_indices] = np.clip(batch[active_indices] + r[active_indices], clip_min, clip_max) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + batch[active_indices] = np.clip(batch[active_indices] + r[active_indices], + self.classifier.clip_values[0], self.classifier.clip_values[1]) + else: + batch[active_indices] += r[active_indices] # Recompute prediction for new x f = self.classifier.predict(batch, logits=True) @@ -137,8 +142,11 @@ def generate(self, x, y=None): current_step += 1 # Apply overshoot parameter - x_adv[batch_index_1:batch_index_2] = np.clip(x_adv[batch_index_1:batch_index_2] + ( - 1 + self.epsilon) * (batch - x_adv[batch_index_1:batch_index_2]), clip_min, clip_max) + x_adv[batch_index_1:batch_index_2] = x_adv[batch_index_1:batch_index_2] + \ + (1 + self.epsilon) * (batch - x_adv[batch_index_1:batch_index_2]) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + np.clip(x_adv[batch_index_1:batch_index_2], self.classifier.clip_values[0], + self.classifier.clip_values[1], out=x_adv[batch_index_1:batch_index_2]) logger.info('Success rate of DeepFool attack: %.2f%%', (np.sum(np.argmax(preds, axis=1) != np.argmax(self.classifier.predict(x_adv), axis=1)) / diff --git a/art/attacks/elastic_net.py b/art/attacks/elastic_net.py index 111c94f2a6..e693950f7d 100644 --- a/art/attacks/elastic_net.py +++ b/art/attacks/elastic_net.py @@ -167,7 +167,6 @@ def generate(self, x, y=None): :rtype: `np.ndarray` """ x_adv = x.astype(NUMPY_DTYPE) - (clip_min, clip_max) = self.classifier.clip_values # Assert that, if attack is targeted, y is provided: if self.targeted and y is None: @@ -188,7 +187,8 @@ def generate(self, x, y=None): x_adv[batch_index_1:batch_index_2] = self._generate_batch(x_batch, y_batch) # Apply clip - x_adv = np.clip(x_adv, clip_min, clip_max) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + x_adv = np.clip(x_adv, self.classifier.clip_values[0], self.classifier.clip_values[1]) # Compute success rate of the EAD attack logger.info('Success rate of EAD attack: %.2f%%', diff --git a/art/attacks/fast_gradient.py b/art/attacks/fast_gradient.py index 146425823d..dd2ce28502 100644 --- a/art/attacks/fast_gradient.py +++ b/art/attacks/fast_gradient.py @@ -21,6 +21,7 @@ import numpy as np +from art import NUMPY_DTYPE from art.attacks.attack import Attack from art.utils import compute_success, get_labels_np_array, random_sphere @@ -209,19 +210,25 @@ def _compute_perturbation(self, batch, batch_labels): return grad def _apply_perturbation(self, batch, perturbation, eps_step): - clip_min, clip_max = self.classifier.clip_values - return np.clip(batch + eps_step * perturbation, clip_min, clip_max) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + clip_min, clip_max = self.classifier.clip_values + batch = np.clip(batch + eps_step * perturbation, clip_min, clip_max) + else: + batch = batch + eps_step * perturbation + + return batch def _compute(self, x, y, eps, eps_step, random_init): if random_init: n = x.shape[0] m = np.prod(x.shape[1:]) - adv_x = x.copy() + random_sphere(n, m, eps, self.norm).reshape(x.shape) + adv_x = x.astype(NUMPY_DTYPE) + random_sphere(n, m, eps, self.norm).reshape(x.shape) - clip_min, clip_max = self.classifier.clip_values - adv_x = np.clip(adv_x, clip_min, clip_max) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + clip_min, clip_max = self.classifier.clip_values + adv_x = np.clip(adv_x, clip_min, clip_max) else: - adv_x = x.copy() + adv_x = x.astype(NUMPY_DTYPE) # Compute perturbation with implicit batching for batch_id in range(int(np.ceil(adv_x.shape[0] / float(self.batch_size)))): diff --git a/art/attacks/iterative_method.py b/art/attacks/iterative_method.py index cef8c05154..e3a0e042ce 100644 --- a/art/attacks/iterative_method.py +++ b/art/attacks/iterative_method.py @@ -21,6 +21,7 @@ import numpy as np +from art import NUMPY_DTYPE from art.attacks import FastGradientMethod from art.utils import compute_success, get_labels_np_array, projection @@ -98,7 +99,7 @@ def generate(self, x, y=None): rate_best = 0.0 for i_random_init in range(max(1, self.num_random_init)): - adv_x = x.copy() + adv_x = x.astype(NUMPY_DTYPE) for i_max_iter in range(self.max_iter): diff --git a/art/attacks/newtonfool.py b/art/attacks/newtonfool.py index 840ebb2284..beda148d18 100644 --- a/art/attacks/newtonfool.py +++ b/art/attacks/newtonfool.py @@ -21,6 +21,7 @@ import numpy as np +from art import NUMPY_DTYPE from art.attacks.attack import Attack from art.utils import to_categorical @@ -61,10 +62,9 @@ def generate(self, x, y=None): :return: An array holding the adversarial examples. :rtype: `np.ndarray` """ - x_adv = x.copy() + x_adv = x.astype(NUMPY_DTYPE) # Initialize variables - clip_min, clip_max = self.classifier.clip_values y_pred = self.classifier.predict(x, logits=False) pred_class = np.argmax(y_pred, axis=1) @@ -98,7 +98,11 @@ def generate(self, x, y=None): batch += di_batch # Apply clip - x_adv[batch_index_1:batch_index_2] = np.clip(batch, clip_min, clip_max) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + clip_min, clip_max = self.classifier.clip_values + x_adv[batch_index_1:batch_index_2] = np.clip(batch, clip_min, clip_max) + else: + x_adv[batch_index_1:batch_index_2] = batch logger.info('Success rate of NewtonFool attack: %.2f%%', (np.sum(np.argmax(self.classifier.predict(x), axis=1) != @@ -165,9 +169,10 @@ def _compute_pert(theta, grads, norm_grad): """ # Pick a small scalar to avoid division by 0 tol = 10e-8 - nom = -theta[:, None, None, None] * grads + + nom = -theta.reshape((-1,) + (1,) * (len(grads.shape) - 1)) * grads denom = norm_grad**2 denom[denom < tol] = tol - result = nom / denom[:, None, None, None] + result = nom / denom.reshape((-1,) + (1,) * (len(grads.shape) - 1)) return result diff --git a/art/attacks/saliency_map.py b/art/attacks/saliency_map.py index fa47b8b1c1..c48b70f03f 100644 --- a/art/attacks/saliency_map.py +++ b/art/attacks/saliency_map.py @@ -21,6 +21,7 @@ import numpy as np +from art import NUMPY_DTYPE from art.attacks.attack import Attack logger = logging.getLogger(__name__) @@ -61,13 +62,10 @@ def generate(self, x, y=None): :return: An array holding the adversarial examples. :rtype: `np.ndarray` """ - # Parse and save attack-specific parameters - clip_min, clip_max = self.classifier.clip_values - # Initialize variables dims = list(x.shape[1:]) self._nb_features = np.product(dims) - x_adv = np.reshape(np.copy(x), (-1, self._nb_features)) + x_adv = np.reshape(x.astype(NUMPY_DTYPE), (-1, self._nb_features)) preds = np.argmax(self.classifier.predict(x), axis=1) # Determine target classes for attack @@ -86,10 +84,12 @@ def generate(self, x, y=None): # Main algorithm for each batch # Initialize the search space; optimize to remove features that can't be changed search_space = np.zeros_like(batch) - if self.theta > 0: - search_space[batch < clip_max] = 1 - else: - search_space[batch > clip_min] = 1 + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + clip_min, clip_max = self.classifier.clip_values + if self.theta > 0: + search_space[batch < clip_max] = 1 + else: + search_space[batch > clip_min] = 1 # Get current predictions current_pred = preds[batch_index_1:batch_index_2] @@ -106,22 +106,31 @@ def generate(self, x, y=None): all_feat[active_indices][np.arange(len(active_indices)), feat_ind[:, 0]] = 1 all_feat[active_indices][np.arange(len(active_indices)), feat_ind[:, 1]] = 1 - # Prepare update depending of theta - if self.theta > 0: - clip_func, clip_value = np.minimum, clip_max + # Apply attack with clipping + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + # Prepare update depending of theta + if self.theta > 0: + clip_func, clip_value = np.minimum, clip_max + else: + clip_func, clip_value = np.maximum, clip_min + + # Update adversarial examples + tmp_batch = batch[active_indices] + tmp_batch[np.arange(len(active_indices)), feat_ind[:, 0]] = \ + clip_func(clip_value, tmp_batch[np.arange(len(active_indices)), feat_ind[:, 0]] + self.theta) + tmp_batch[np.arange(len(active_indices)), feat_ind[:, 1]] = \ + clip_func(clip_value, tmp_batch[np.arange(len(active_indices)), feat_ind[:, 1]] + self.theta) + batch[active_indices] = tmp_batch + + # Remove indices from search space if max/min values were reached + search_space[batch == clip_value] = 0 + + # Apply attack without clipping else: - clip_func, clip_value = np.maximum, clip_min - - # Update adversarial examples - tmp_batch = batch[active_indices] - tmp_batch[np.arange(len(active_indices)), feat_ind[:, 0]] = clip_func(clip_value, - tmp_batch[np.arange(len(active_indices)), feat_ind[:, 0]] + self.theta) - tmp_batch[np.arange(len(active_indices)), feat_ind[:, 1]] = clip_func(clip_value, - tmp_batch[np.arange(len(active_indices)), feat_ind[:, 1]] + self.theta) - batch[active_indices] = tmp_batch - - # Remove indices from search space if max/min values were reached - search_space[batch == clip_value] = 0 + tmp_batch = batch[active_indices] + tmp_batch[np.arange(len(active_indices)), feat_ind[:, 0]] += self.theta + tmp_batch[np.arange(len(active_indices)), feat_ind[:, 1]] += self.theta + batch[active_indices] = tmp_batch # Recompute model prediction current_pred = np.argmax(self.classifier.predict(np.reshape(batch, [batch.shape[0]] + dims)), axis=1) diff --git a/art/attacks/spatial_transformation.py b/art/attacks/spatial_transformation.py index 6b7d2e0ed6..da09a48221 100644 --- a/art/attacks/spatial_transformation.py +++ b/art/attacks/spatial_transformation.py @@ -78,6 +78,10 @@ def generate(self, x, y=None): """ logger.info('Computing spatial transformation based on grid search.') + if len(x.shape) == 2: + raise ValueError('Feature vectors detected. The attack can only be applied to data with spatial' + 'dimensions.') + if self.attack_trans_x is None or self.attack_trans_y is None or self.attack_rot is None: y_pred = self.classifier.predict(x, logits=False) @@ -154,7 +158,9 @@ def _perturb(self, x, trans_x, trans_y, rot): else: raise ValueError("Unsupported channel index.") - x_adv = np.clip(x_adv, self.classifier.clip_values[0], self.classifier.clip_values[1]) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + np.clip(x_adv, self.classifier.clip_values[0], self.classifier.clip_values[1], out=x_adv) + return x_adv def set_params(self, **kwargs): diff --git a/art/attacks/universal_perturbation.py b/art/attacks/universal_perturbation.py index 3894c6f760..c0f9a4cc17 100644 --- a/art/attacks/universal_perturbation.py +++ b/art/attacks/universal_perturbation.py @@ -89,7 +89,7 @@ def generate(self, x, y=None): logger.info('Computing universal perturbation based on %s attack.', self.attacker) # Init universal perturbation - v = 0 + noise = 0 fooling_rate = 0.0 nb_instances = len(x) @@ -108,33 +108,38 @@ def generate(self, x, y=None): for j, ex in enumerate(x[rnd_idx]): xi = ex[None, ...] - current_label = np.argmax(self.classifier.predict(xi + v, logits=True)[0]) + current_label = np.argmax(self.classifier.predict(xi + noise, logits=True)[0]) original_label = np.argmax(pred_y[rnd_idx][j]) if current_label == original_label: # Compute adversarial perturbation - adv_xi = attacker.generate(xi + v) + adv_xi = attacker.generate(xi + noise) new_label = np.argmax(self.classifier.predict(adv_xi, logits=True)[0]) # If the class has changed, update v if current_label != new_label: - v = adv_xi - xi + noise = adv_xi - xi # Project on L_p ball - v = projection(v, self.eps, self.norm) + noise = projection(noise, self.eps, self.norm) nb_iter += 1 + # Apply attack and clip + x_adv = x + noise + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + clip_min, clip_max = self.classifier.clip_values + x_adv = np.clip(x_adv, clip_min, clip_max) + # Compute the error rate - adv_x = x + v - adv_y = np.argmax(self.classifier.predict(adv_x, logits=False), axis=1) - fooling_rate = np.sum(pred_y_max != adv_y) / nb_instances + y_adv = np.argmax(self.classifier.predict(x_adv, logits=False), axis=1) + fooling_rate = np.sum(pred_y_max != y_adv) / nb_instances self.fooling_rate = fooling_rate - self.converged = (nb_iter < self.max_iter) - self.v = v + self.converged = nb_iter < self.max_iter + self.noise = noise logger.info('Success rate of universal perturbation attack: %.2f%%', fooling_rate) - return adv_x + return x_adv def set_params(self, **kwargs): """ diff --git a/art/attacks/virtual_adversarial.py b/art/attacks/virtual_adversarial.py index 16719cdd6e..8e83a0c356 100644 --- a/art/attacks/virtual_adversarial.py +++ b/art/attacks/virtual_adversarial.py @@ -21,6 +21,7 @@ import numpy as np +from art import NUMPY_DTYPE from art.attacks.attack import Attack logger = logging.getLogger(__name__) @@ -45,7 +46,7 @@ def __init__(self, classifier, max_iter=1, finite_diff=1e-6, eps=.1, batch_size= :type finite_diff: `float` :param max_iter: The maximum number of iterations. :type max_iter: `int` - :param batch_size: Batch size + :param batch_size: Batch size. :type batch_size: `int` """ super(VirtualAdversarialMethod, self).__init__(classifier) @@ -66,8 +67,7 @@ def generate(self, x, y=None): :return: An array holding the adversarial examples. :rtype: `np.ndarray` """ - clip_min, clip_max = self.classifier.clip_values - x_adv = np.copy(x) + x_adv = x.astype(NUMPY_DTYPE) preds = self.classifier.predict(x_adv, logits=False) # Pick a small scalar to avoid division by 0 @@ -76,7 +76,7 @@ def generate(self, x, y=None): # Compute perturbation with implicit batching for batch_id in range(int(np.ceil(x_adv.shape[0] / float(self.batch_size)))): batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size - batch = x_adv[batch_index_1:batch_index_2] + batch = x_adv[batch_index_1:batch_index_2].reshape((x_adv.shape[0], -1)) # Main algorithm for each batch d = np.random.randn(*batch.shape) @@ -84,24 +84,31 @@ def generate(self, x, y=None): # Main loop of the algorithm for _ in range(self.max_iter): d = self._normalize(d) - preds_new = self.classifier.predict(batch + d, logits=False) + preds_new = self.classifier.predict((batch + d).reshape((-1,) + self.classifier.input_shape), + logits=False) from scipy.stats import entropy kl_div1 = entropy(np.transpose(preds[batch_index_1:batch_index_2]), np.transpose(preds_new)) d_new = np.zeros_like(d) - for w in range(d.shape[1]): - for h in range(d.shape[2]): - for c in range(d.shape[3]): - d[:, w, h, c] += self.finite_diff - preds_new = self.classifier.predict(batch + d, logits=False) - kl_div2 = entropy(np.transpose(preds[batch_index_1:batch_index_2]), np.transpose(preds_new)) - d_new[:, w, h, c] = (kl_div2 - kl_div1) / (self.finite_diff + tol) - d[:, w, h, c] -= self.finite_diff + for current_index in range(d.shape[1]): + d[:, current_index] += self.finite_diff + preds_new = self.classifier.predict((batch + d).reshape((-1,) + self.classifier.input_shape), + logits=False) + kl_div2 = entropy(np.transpose(preds[batch_index_1:batch_index_2]), np.transpose(preds_new)) + d_new[:, current_index] = (kl_div2 - kl_div1) / (self.finite_diff + tol) + d[:, current_index] -= self.finite_diff d = d_new # Apply perturbation and clip - x_adv[batch_index_1:batch_index_2] = np.clip(batch + self.eps * self._normalize(d), clip_min, clip_max) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + clip_min, clip_max = self.classifier.clip_values + x_adv[batch_index_1:batch_index_2] = \ + np.clip(batch + self.eps * self._normalize(d), clip_min, clip_max) \ + .reshape((-1,) + self.classifier.input_shape) + else: + x_adv[batch_index_1:batch_index_2] = (batch + self.eps * self._normalize(d)) \ + .reshape((-1,) + self.classifier.input_shape) return x_adv @@ -116,12 +123,9 @@ def _normalize(x): :rtype: `np.ndarray` """ tol = 1e-10 - dims = x.shape - x = x.reshape(dims[0], -1) inverse = (np.sum(x**2, axis=1) + tol) ** -.5 x = x * inverse[:, None] - x = np.reshape(x, dims) return x diff --git a/art/attacks/zoo.py b/art/attacks/zoo.py index 2a78404a7d..61a57a470f 100644 --- a/art/attacks/zoo.py +++ b/art/attacks/zoo.py @@ -80,6 +80,10 @@ def __init__(self, classifier, confidence=0.0, targeted=False, learning_rate=1e- """ super(ZooAttack, self).__init__(classifier) + if len(classifier.input_shape) == 1: + raise ValueError('Feature vectors detected. The ZOO attack can only be applied to data with spatial' + 'dimensions.') + kwargs = { 'confidence': confidence, 'targeted': targeted, @@ -155,6 +159,11 @@ def generate(self, x, y=None): :return: An array holding the adversarial examples. :rtype: `np.ndarray` """ + # ZOO can probably be extended to feature vectors if no zooming or resizing is applied + if len(x.shape) == 2: + raise ValueError('Feature vectors detected. The ZOO attack can only be applied to data with spatial' + 'dimensions.') + # Check that `y` is provided for targeted attacks if self.targeted and y is None: raise ValueError('Target labels `y` need to be provided for a targeted attack.') @@ -174,10 +183,12 @@ def generate(self, x, y=None): y_batch = y[batch_index_1:batch_index_2] res = self._generate_batch(x_batch, y_batch) x_adv.append(res) + x_adv = np.vstack(x_adv) # Apply clip - x_adv = np.vstack(x_adv) - x_adv = np.clip(x_adv, self.classifier.clip_values[0], self.classifier.clip_values[1]) + if hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + clip_min, clip_max = self.classifier.clip_values + np.clip(x_adv, clip_min, clip_max, out=x_adv) # Log success rate of the ZOO attack logger.info('Success rate of ZOO attack: %.2f%%', @@ -397,8 +408,9 @@ def _optimizer_adam_coordinate(self, losses, index, mean, var, current_noise, le current_noise[index] -= learning_rate * corr * mean[index] / (np.sqrt(var[index]) + 1e-8) adam_epochs[index] += 1 - if proj: - np.clip(current_noise[index], self.classifier.clip_values[0], self.classifier.clip_values[1]) + if proj and hasattr(self.classifier, 'clip_values') and self.classifier.clip_values is not None: + clip_min, clip_max = self.classifier.clip_values + current_noise[index] = np.clip(current_noise[index], clip_min, clip_max) return current_noise.reshape(orig_shape) diff --git a/art/classifiers/classifier.py b/art/classifiers/classifier.py index 50795d6aad..b7ecfa61b7 100644 --- a/art/classifiers/classifier.py +++ b/art/classifiers/classifier.py @@ -20,6 +20,8 @@ import abc import sys +import numpy as np + # Ensure compatibility with Python 2 and 3 when using ABCMeta if sys.version_info >= (3, 4): ABC = abc.ABC @@ -31,15 +33,17 @@ class Classifier(ABC): """ Base class for all classifiers. """ - def __init__(self, clip_values, channel_index, defences=None, preprocessing=(0, 1)): + def __init__(self, channel_index, clip_values=None, defences=None, preprocessing=(0, 1)): """ Initialize a `Classifier` object. - :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed - for features. - :type clip_values: `tuple` :param channel_index: Index of the axis in data containing the color channels or features. :type channel_index: `int` + :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and + maximum values allowed for features. If floats are provided, these will be used as the range of all + features. If arrays are provided, each value will be considered the bound for a feature, thus + the shape of clip values needs to match the total number of features. + :type clip_values: `tuple` :param defences: Defence(s) to be activated with the classifier. :type defences: :class:`.Preprocessor` or `list(Preprocessor)` instances :param preprocessing: Tuple of the form `(substractor, divider)` of floats or `np.ndarray` of values to be @@ -49,10 +53,12 @@ def __init__(self, clip_values, channel_index, defences=None, preprocessing=(0, """ from art.defences.preprocessor import Preprocessor - if len(clip_values) != 2: - raise ValueError('`clip_values` should be a tuple of 2 floats containing the allowed data range.') - if clip_values[0] >= clip_values[1]: - raise ValueError('Invalid `clip_values`: min >= max.') + if clip_values is not None: + if len(clip_values) != 2: + raise ValueError('`clip_values` should be a tuple of 2 floats or arrays containing the allowed' + 'data range.') + if np.array(clip_values[0] >= clip_values[1]).any(): + raise ValueError('Invalid `clip_values`: min >= max.') self._clip_values = clip_values self._channel_index = channel_index @@ -355,8 +361,8 @@ def _apply_processing_gradient(self, grad): return res def __repr__(self): - repr_ = "%s(clip_values=%r, channel_index=%r, defences=%r, preprocessing=%r)" \ + repr_ = "%s(channel_index=%r, clip_values=%r, defences=%r, preprocessing=%r)" \ % (self.__module__ + '.' + self.__class__.__name__, - self.clip_values, self.channel_index, self.defences, self.preprocessing) + self.channel_index, self.clip_values, self.defences, self.preprocessing) return repr_ diff --git a/art/classifiers/ensemble.py b/art/classifiers/ensemble.py index a51dd759e9..446d5843ba 100644 --- a/art/classifiers/ensemble.py +++ b/art/classifiers/ensemble.py @@ -31,15 +31,12 @@ class EnsembleClassifier(Classifier): Class allowing to aggregate multiple classifiers as an ensemble. The individual classifiers are expected to be trained when the ensemble is created and no training procedures are provided through this class. """ - def __init__(self, clip_values, classifiers, classifier_weights=None, channel_index=3, defences=None, + def __init__(self, classifiers, classifier_weights=None, channel_index=3, clip_values=None, defences=None, preprocessing=(0, 1)): """ Initialize a :class:`.EnsembleClassifier` object. The data range values and colour channel index have to be consistent for all the classifiers in the ensemble. - :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed - for features. - :type clip_values: `tuple` :param classifiers: List of :class:`.Classifier` instances to be ensembled together. :type classifiers: `list` :param classifier_weights: List of weights, one scalar per classifier, to assign to their prediction when @@ -47,6 +44,11 @@ def __init__(self, clip_values, classifiers, classifier_weights=None, channel_in :type classifier_weights: `list` or `np.ndarray` or `None` :param channel_index: Index of the axis in data containing the color channels or features. :type channel_index: `int` + :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and + maximum values allowed for features. If floats are provided, these will be used as the range of all + features. If arrays are provided, each value will be considered the bound for a feature, thus + the shape of clip values needs to match the total number of features. + :type clip_values: `tuple` :param defences: Defences to be activated with the classifier. :type defences: `str` or `list(str)` :param preprocessing: Tuple of the form `(substractor, divider)` of floats or `np.ndarray` of values to be @@ -80,7 +82,6 @@ def __init__(self, clip_values, classifiers, classifier_weights=None, channel_in self._input_shape = classifiers[0].input_shape self._nb_classes = classifiers[0].nb_classes - self._clip_values = clip_values # Set weights for classifiers if classifier_weights is None: @@ -243,10 +244,10 @@ def set_learning_phase(self, train): self._learning_phase = train def __repr__(self): - repr_ = "%s(clip_values=%r, classifiers=%r, classifier_weights=%r, channel_index=%r, defences=%r, " \ + repr_ = "%s(classifiers=%r, classifier_weights=%r, channel_index=%r, clip_values=%r, defences=%r, " \ "preprocessing=%r)" \ % (self.__module__ + '.' + self.__class__.__name__, - self.clip_values, self._classifiers, self._classifier_weights, self.channel_index, self.defences, + self._classifiers, self._classifier_weights, self.channel_index, self.clip_values, self.defences, self.preprocessing) return repr_ diff --git a/art/classifiers/keras.py b/art/classifiers/keras.py index de0dc0028e..88336b4eb2 100644 --- a/art/classifiers/keras.py +++ b/art/classifiers/keras.py @@ -31,20 +31,22 @@ class KerasClassifier(Classifier): """ Wrapper class for importing Keras models. The supported backends for Keras are TensorFlow and Theano. """ - def __init__(self, clip_values, model, use_logits=False, channel_index=3, defences=None, preprocessing=(0, 1), + def __init__(self, model, use_logits=False, channel_index=3, clip_values=None, defences=None, preprocessing=(0, 1), input_layer=0, output_layer=0, custom_activation=False): """ Create a `Classifier` instance from a Keras model. Assumes the `model` passed as argument is compiled. - :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed - for features. - :type clip_values: `tuple` :param model: Keras model :type model: `keras.models.Model` :param use_logits: True if the output of the model are the logits. :type use_logits: `bool` :param channel_index: Index of the axis in data containing the color channels or features. :type channel_index: `int` + :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and + maximum values allowed for features. If floats are provided, these will be used as the range of all + features. If arrays are provided, each value will be considered the bound for a feature, thus + the shape of clip values needs to match the total number of features. + :type clip_values: `tuple` :param defences: Defences to be activated with the classifier. :type defences: :class:`.Preprocessor` or `list(Preprocessor)` instances :param preprocessing: Tuple of the form `(substractor, divider)` of floats or `np.ndarray` of values to be @@ -549,10 +551,10 @@ def __setstate__(self, state): state['_custom_activation']) def __repr__(self): - repr_ = "%s(clip_values=%r, model=%r, use_logits=%r, channel_index=%r, defences=%r, preprocessing=%r, " \ + repr_ = "%s(model=%r, use_logits=%r, channel_index=%r, clip_values=%r, defences=%r, preprocessing=%r, " \ "input_layer=%r, output_layer=%r, custom_activation=%r)" \ % (self.__module__ + '.' + self.__class__.__name__, - self.clip_values, self._model, self._use_logits, self.channel_index, self.defences, + self._model, self._use_logits, self.channel_index, self.clip_values, self.defences, self.preprocessing, self._input_layer, self._output_layer, self._custom_activation) return repr_ diff --git a/art/classifiers/mxnet.py b/art/classifiers/mxnet.py index dd477fa687..c540b3dfc2 100644 --- a/art/classifiers/mxnet.py +++ b/art/classifiers/mxnet.py @@ -22,6 +22,7 @@ import numpy as np import six +from art import NUMPY_DTYPE from art.classifiers import Classifier logger = logging.getLogger(__name__) @@ -31,15 +32,12 @@ class MXClassifier(Classifier): """ Wrapper class for importing MXNet Gluon model. """ - def __init__(self, clip_values, model, input_shape, nb_classes, optimizer=None, ctx=None, channel_index=1, + def __init__(self, model, input_shape, nb_classes, optimizer=None, ctx=None, channel_index=1, clip_values=None, defences=None, preprocessing=(0, 1)): """ Initialize an `MXClassifier` object. Assumes the `model` passed as parameter is a Gluon model and that the loss function is the softmax cross-entropy. - :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed - for features. - :type clip_values: `tuple` :param model: The model with logits as expected output. :type model: `mxnet.gluon.Block` :param input_shape: The shape of one input instance. @@ -53,6 +51,11 @@ def __init__(self, clip_values, model, input_shape, nb_classes, optimizer=None, :type ctx: `mxnet.context.Context` :param channel_index: Index of the axis in data containing the color channels or features. :type channel_index: `int` + :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and + maximum values allowed for features. If floats are provided, these will be used as the range of all + features. If arrays are provided, each value will be considered the bound for a feature, thus + the shape of clip values needs to match the total number of features. + :type clip_values: `tuple` :param defences: Defences to be activated with the classifier. :type defences: `str` or `list(str)` :param preprocessing: Tuple of the form `(substractor, divider)` of floats or `np.ndarray` of values to be @@ -117,7 +120,8 @@ def fit(self, x, y, batch_size=128, nb_epochs=20, **kwargs): # Train for one epoch for m in range(nb_batch): - x_batch = mx.nd.array(x_preproc[ind[m * batch_size:(m + 1) * batch_size]]).as_in_context(self._ctx) + x_batch = mx.nd.array(x_preproc[ind[m * batch_size:(m + 1) * batch_size]].astype(NUMPY_DTYPE)) \ + .as_in_context(self._ctx) y_batch = mx.nd.array(y_preproc[ind[m * batch_size:(m + 1) * batch_size]]).as_in_context(self._ctx) with mx.autograd.record(train_mode=train_mode): @@ -146,12 +150,13 @@ def fit_generator(self, generator, nb_epochs=20, **kwargs): train_mode = self._learning_phase if hasattr(self, '_learning_phase') else True + # TODO fix fit_generator w.r.t. defenses if isinstance(generator, MXDataGenerator) and \ not (hasattr(self, 'label_smooth') or hasattr(self, 'feature_squeeze')): # Train directly in MXNet for _ in range(nb_epochs): for x_batch, y_batch in generator.data_loader: - x_batch = mx.nd.array(x_batch).as_in_context(self._ctx) + x_batch = mx.nd.array(x_batch.astype(NUMPY_DTYPE)).as_in_context(self._ctx) y_batch = mx.nd.argmax(y_batch, axis=1) y_batch = mx.nd.array(y_batch).as_in_context(self._ctx) @@ -195,7 +200,7 @@ def predict(self, x, logits=False, batch_size=128): begin, end = m * batch_size, min((m + 1) * batch_size, x_preproc.shape[0]) # Predict - x_batch = mx.nd.array(x_preproc[begin:end], ctx=self._ctx) + x_batch = mx.nd.array(x_preproc[begin:end].astype(NUMPY_DTYPE), ctx=self._ctx) x_batch.attach_grad() with mx.autograd.record(train_mode=train_mode): preds = self._model(x_batch) @@ -237,7 +242,7 @@ def class_gradient(self, x, label=None, logits=False): x_preproc = self._apply_processing(x) x_defences, _ = self._apply_defences(x_preproc, None, fit=False) - x_defences = mx.nd.array(x_defences, ctx=self._ctx) + x_defences = mx.nd.array(x_defences.astype(NUMPY_DTYPE), ctx=self._ctx) x_defences.attach_grad() if label is None: @@ -308,7 +313,7 @@ def loss_gradient(self, x, y): x_preproc = self._apply_processing(x) x_defences, y_defences = self._apply_defences(x_preproc, y, fit=False) y_defences = mx.nd.array([np.argmax(y_defences, axis=1)]).T - x_defences = mx.nd.array(x_defences, ctx=self._ctx) + x_defences = mx.nd.array(x_defences.astype(NUMPY_DTYPE), ctx=self._ctx) x_defences.attach_grad() loss = mx.gluon.loss.SoftmaxCrossEntropyLoss() @@ -386,7 +391,7 @@ def get_activations(self, x, layer, batch_size=128): begin, end = batch_index * batch_size, min((batch_index + 1) * batch_size, x_preproc.shape[0]) # Predict - x_batch = mx.nd.array(x_preproc[begin:end], ctx=self._ctx) + x_batch = mx.nd.array(x_preproc[begin:end].astype(NUMPY_DTYPE), ctx=self._ctx) x_batch.attach_grad() with mx.autograd.record(train_mode=train_mode): preds = self._model[layer_ind](x_batch) @@ -434,11 +439,11 @@ def save(self, filename, path=None): logger.info("Model parameters saved in path: %s.params.", full_path) def __repr__(self): - repr_ = "%s(clip_values=%r, model=%r, input_shape=%r, nb_classes=%r, optimizer=%r, ctx=%r, channel_index=%r, " \ - "defences=%r, preprocessing=%r)" \ + repr_ = "%s(model=%r, input_shape=%r, nb_classes=%r, optimizer=%r, ctx=%r, channel_index=%r, " \ + "clip_values=%r, defences=%r, preprocessing=%r)" \ % (self.__module__ + '.' + self.__class__.__name__, - self.clip_values, self._model, self.input_shape, self.nb_classes, self._optimizer, self._ctx, - self.channel_index, self.defences, self.preprocessing) + self._model, self.input_shape, self.nb_classes, self._optimizer, self._ctx, + self.channel_index, self.clip_values, self.defences, self.preprocessing) return repr_ diff --git a/art/classifiers/pytorch.py b/art/classifiers/pytorch.py index a21adc9001..ac34d92087 100644 --- a/art/classifiers/pytorch.py +++ b/art/classifiers/pytorch.py @@ -32,14 +32,11 @@ class PyTorchClassifier(Classifier): """ This class implements a classifier with the PyTorch framework. """ - def __init__(self, clip_values, model, loss, optimizer, input_shape, nb_classes, channel_index=1, defences=None, - preprocessing=(0, 1)): + def __init__(self, model, loss, optimizer, input_shape, nb_classes, channel_index=1, clip_values=None, + defences=None, preprocessing=(0, 1)): """ Initialization specifically for the PyTorch-based implementation. - :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed - for features. - :type clip_values: `tuple` :param model: PyTorch model. The forward function of the model must return the logit output. :type model: is instance of `torch.nn.Module` :param loss: The loss function for which to compute gradients for training. The target label must be raw @@ -53,6 +50,11 @@ def __init__(self, clip_values, model, loss, optimizer, input_shape, nb_classes, :type nb_classes: `int` :param channel_index: Index of the axis in data containing the color channels or features. :type channel_index: `int` + :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and + maximum values allowed for features. If floats are provided, these will be used as the range of all + features. If arrays are provided, each value will be considered the bound for a feature, thus + the shape of clip values needs to match the total number of features. + :type clip_values: `tuple` :param defences: Defences to be activated with the classifier. :type defences: `str` or `list(str)` :param preprocessing: Tuple of the form `(substractor, divider)` of floats or `np.ndarray` of values to be @@ -153,12 +155,9 @@ def fit(self, x, y, batch_size=128, nb_epochs=10, **kwargs): # Train for one epoch for m in range(num_batch): - i_batch = torch.from_numpy(x_preproc[ind[m * batch_size:(m + 1) * batch_size]]).to(self._device) + i_batch = torch.from_numpy(x_preproc[ind[m * batch_size:(m + 1) * batch_size]]).to(self._device).float() o_batch = torch.from_numpy(y_preproc[ind[m * batch_size:(m + 1) * batch_size]]).to(self._device) - # Cast to float - i_batch = i_batch.float() - # Zero the parameter gradients self._optimizer.zero_grad() @@ -239,7 +238,7 @@ def class_gradient(self, x, label=None, logits=False): # Convert the inputs to Tensors x_preproc = self._apply_processing(x) x_defences, _ = self._apply_defences(x_preproc, None, fit=False) - x_defences = torch.from_numpy(x_defences).to(self._device) + x_defences = torch.from_numpy(x_defences).to(self._device).float() # Compute gradient wrt what layer_idx = self._init_grads() @@ -446,11 +445,11 @@ def save(self, filename, path=None): logger.info("Optimizer state dict saved in path: %s.", full_path + '.optimizer') def __repr__(self): - repr_ = "%s(clip_values=%r, model=%r, loss=%r, optimizer=%r, input_shape=%r, nb_classes=%r, " \ - "channel_index=%r, defences=%r, preprocessing=%r)" \ + repr_ = "%s(model=%r, loss=%r, optimizer=%r, input_shape=%r, nb_classes=%r, " \ + "channel_index=%r, clip_values=%r, defences=%r, preprocessing=%r)" \ % (self.__module__ + '.' + self.__class__.__name__, - self.clip_values, self._model, self._loss, self._optimizer, self._input_shape, self.nb_classes, - self.channel_index, self.defences, self.preprocessing) + self._model, self._loss, self._optimizer, self._input_shape, self.nb_classes, + self.channel_index, self.clip_values, self.defences, self.preprocessing) return repr_ diff --git a/art/classifiers/tensorflow.py b/art/classifiers/tensorflow.py index 2e8583d7e8..a4c6f24954 100644 --- a/art/classifiers/tensorflow.py +++ b/art/classifiers/tensorflow.py @@ -15,14 +15,11 @@ class TFClassifier(Classifier): """ This class implements a classifier with the Tensorflow framework. """ - def __init__(self, clip_values, input_ph, logits, output_ph=None, train=None, loss=None, learning=None, sess=None, - channel_index=3, defences=None, preprocessing=(0, 1)): + def __init__(self, input_ph, logits, output_ph=None, train=None, loss=None, learning=None, sess=None, + channel_index=3, clip_values=None, defences=None, preprocessing=(0, 1)): """ - Initialization specifically for the Tensorflow-based implementation. + Initialization specific to Tensorflow models implementation. - :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed - for features. - :type clip_values: `tuple` :param input_ph: The input placeholder. :type input_ph: `tf.Placeholder` :param logits: The logits layer of the model. @@ -42,6 +39,11 @@ def __init__(self, clip_values, input_ph, logits, output_ph=None, train=None, lo :type sess: `tf.Session` :param channel_index: Index of the axis in data containing the color channels or features. :type channel_index: `int` + :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and + maximum values allowed for features. If floats are provided, these will be used as the range of all + features. If arrays are provided, each value will be considered the bound for a feature, thus + the shape of clip values needs to match the total number of features. + :type clip_values: `tuple` :param defences: Defences to be activated with the classifier. :type defences: `str` or `list(str)` :param preprocessing: Tuple of the form `(substractor, divider)` of floats or `np.ndarray` of values to be @@ -489,10 +491,10 @@ def save(self, filename, path=None): logger.info('Model saved in path: %s.', full_path) def __repr__(self): - repr_ = "%s(clip_values=%r, input_ph=%r, logits=%r, output_ph=%r, train=%r, loss=%r, learnign=%r, " \ - "sess=%r, channel_index=%r, defences=%r, preprocessing=%r)" \ + repr_ = "%s(input_ph=%r, logits=%r, output_ph=%r, train=%r, loss=%r, learnign=%r, " \ + "sess=%r, channel_index=%r, clip_values=%r, defences=%r, preprocessing=%r)" \ % (self.__module__ + '.' + self.__class__.__name__, - self.clip_values, self._input_ph, self._logits, self._output_ph, self._train, self._loss, - self._learning, self._sess, self.channel_index, self.defences, self.preprocessing) + self._input_ph, self._logits, self._output_ph, self._train, self._loss, self._learning, self._sess, + self.channel_index, self.clip_values, self.defences, self.preprocessing) return repr_ diff --git a/art/defences/feature_squeezing.py b/art/defences/feature_squeezing.py index e85d414709..e52d86b5cf 100644 --- a/art/defences/feature_squeezing.py +++ b/art/defences/feature_squeezing.py @@ -95,11 +95,11 @@ def set_params(self, **kwargs): super(FeatureSqueezing, self).set_params(**kwargs) if not isinstance(self.bit_depth, (int, np.int)) or self.bit_depth <= 0 or self.bit_depth > 64: - raise ValueError("The bit depth must be between 1 and 64.") + raise ValueError('The bit depth must be between 1 and 64.') if len(self.clip_values) != 2: raise ValueError('`clip_values` should be a tuple of 2 floats containing the allowed data range.') - if self.clip_values[0] >= self.clip_values[1]: + if np.array(self.clip_values[0] >= self.clip_values[1]).any(): raise ValueError('Invalid `clip_values`: min >= max.') return True diff --git a/art/defences/gaussian_augmentation.py b/art/defences/gaussian_augmentation.py index 042ec054b9..df67a1efed 100644 --- a/art/defences/gaussian_augmentation.py +++ b/art/defences/gaussian_augmentation.py @@ -82,7 +82,7 @@ def __call__(self, x, y=None): indices = np.random.randint(0, x.shape[0], size=size) # Generate noisy samples - x_aug = np.random.normal(x[indices], scale=self.sigma, size=(size,) + x[indices].shape[1:]) + x_aug = np.random.normal(x[indices], scale=self.sigma, size=(size,) + x.shape[1:]) x_aug = np.vstack((x, x_aug)) logger.info('Augmented dataset size: %d', x_aug.shape[0]) diff --git a/art/defences/jpeg_compression.py b/art/defences/jpeg_compression.py index cb1520112b..9a00962d61 100644 --- a/art/defences/jpeg_compression.py +++ b/art/defences/jpeg_compression.py @@ -69,12 +69,17 @@ def __call__(self, x, y=None): :return: compressed sample. :rtype: `np.ndarray` """ - clip_values = (0, 1) + if len(x.shape) == 2: + raise ValueError('Feature vectors detected. JPEG compression can only be applied to data with spatial' + 'dimensions.') + + if self.channel_index >= len(x.shape): + raise ValueError('Channel index does not match input shape.') - assert self.channel_index < len(x.shape) + clip_values = (0, 1) # Swap channel index - if self.channel_index < 3: + if self.channel_index < 3 and len(x.shape) == 4: x_ = np.swapaxes(x, self.channel_index, 3) else: x_ = x.copy() @@ -143,7 +148,7 @@ def set_params(self, **kwargs): if not isinstance(self.quality, (int, np.int)) or self.quality <= 0 or self.quality > 100: logger.error('Image quality must be a positive integer <= 100.') - raise ValueError('Image quality must be a positive integer <= 100..') + raise ValueError('Image quality must be a positive integer <= 100.') if not isinstance(self.channel_index, (int, np.int)) or self.channel_index <= 0: logger.error('Data channel must be a positive integer. The batch dimension is not a valid channel.') diff --git a/art/defences/pixel_defend.py b/art/defences/pixel_defend.py index 4615ccd94c..be86dbed75 100644 --- a/art/defences/pixel_defend.py +++ b/art/defences/pixel_defend.py @@ -70,43 +70,41 @@ def __call__(self, x, y=None): :return: Purified sample. :rtype: `np.ndarray` """ - clip_values = (0, 1) - # Convert into `uint8` + original_shape = x.shape x_ = x.copy() + probs = self.pixel_cnn.get_activations(x_, layer=-1).reshape((x_.shape[0], -1, 256)) x_ = x_ * 255 x_ = x_.astype("uint8") + x_ = x_.reshape((x_.shape[0], -1)) # Start defence one image at a time for i, xi in enumerate(x_): - for r in range(x_.shape[1]): - for c in range(x_.shape[2]): - for k in range(x_.shape[3]): - # Setup the search space - # probs = self.pixel_cnn.predict(np.array([xi / 255.0]), logits=False) - probs = self.pixel_cnn.get_activations(np.array([xi / 255.0]), -1) - f_probs = probs[0, r, c, k] - f_range = range(int(max(xi[r, c, k] - self.eps, 0)), int(min(xi[r, c, k] + self.eps, 255) + 1)) - - # Look in the search space - best_prob = -1 - best_idx = -1 - for idx in f_range: - if f_probs[idx] > best_prob: - best_prob = f_probs[idx] - best_idx = idx - - # Update result - xi[r, c, k] = best_idx + for feat_index in range(x_.shape[1]): + # Setup the search space + f_probs = probs[i, feat_index, :] + f_range = range(int(max(xi[feat_index] - self.eps, 0)), int(min(xi[feat_index] + self.eps, 255) + 1)) + + # Look in the search space + best_prob = -1 + best_idx = -1 + for idx in f_range: + if f_probs[idx] > best_prob: + best_prob = f_probs[idx] + best_idx = idx + + # Update result + xi[feat_index] = best_idx # Update in batch x_[i] = xi # Convert to old dtype x_ = x_ / 255.0 - x_ = x_.astype(NUMPY_DTYPE) + x_ = x_.astype(NUMPY_DTYPE).reshape(original_shape) # Clip values into the range [0, 1] + clip_values = (0, 1) x_ = np.clip(x_, clip_values[0], clip_values[1]) return x_, y diff --git a/art/defences/spatial_smoothing.py b/art/defences/spatial_smoothing.py index 315917f5a5..1a2953496d 100644 --- a/art/defences/spatial_smoothing.py +++ b/art/defences/spatial_smoothing.py @@ -32,9 +32,9 @@ class SpatialSmoothing(Preprocessor): """ Implement the local spatial smoothing defence approach. Defence method from https://arxiv.org/abs/1704.01155. """ - params = ['window_size', 'channel_index'] + params = ['window_size', 'channel_index', 'clip_values'] - def __init__(self, window_size=3, channel_index=3): + def __init__(self, window_size=3, channel_index=3, clip_values=None): """ Create an instance of local spatial smoothing. @@ -42,10 +42,13 @@ def __init__(self, window_size=3, channel_index=3): :type window_size: `int` :param channel_index: Index of the axis in data containing the color channels or features. :type channel_index: `int` + :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed + for features. + :type clip_values: `tuple` """ super(SpatialSmoothing, self).__init__() self._is_fitted = True - self.set_params(window_size=window_size, channel_index=channel_index) + self.set_params(window_size=window_size, channel_index=channel_index, clip_values=clip_values) @property def apply_fit(self): @@ -66,15 +69,19 @@ def __call__(self, x, y=None): :return: Smoothed sample :rtype: `np.ndarray` """ - clip_values = (0, 1) + if len(x.shape) == 2: + raise ValueError('Feature vectors detected. Smoothing can only be applied to data with spatial ' + 'dimensions.') + if self.channel_index >= len(x.shape): + raise ValueError('Channel index does not match input shape.') - assert self.channel_index < len(x.shape) size = [1] + [self.window_size] * (len(x.shape) - 1) size[self.channel_index] = 1 size = tuple(size) result = ndimage.filters.median_filter(x, size=size, mode="reflect") - result = np.clip(result, clip_values[0], clip_values[1]) + if hasattr(self, 'clip_values') and self.clip_values is not None: + np.clip(result, self.clip_values[0], self.clip_values[1], out=result) return result.astype(NUMPY_DTYPE), y @@ -95,6 +102,9 @@ def set_params(self, **kwargs): :type window_size: `int` :param channel_index: Index of the axis in data containing the color channels or features. :type channel_index: `int` + :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed + for features. + :type clip_values: `tuple` """ # Save attack-specific parameters super(SpatialSmoothing, self).set_params(**kwargs) @@ -109,4 +119,10 @@ def set_params(self, **kwargs): raise ValueError('Data channel for smoothing must be a positive integer. The batch dimension is not a' 'valid channel.') + if self.clip_values is not None: + if len(self.clip_values) != 2: + raise ValueError('`clip_values` should be a tuple of 2 floats containing the allowed data range.') + if np.array(self.clip_values[0] >= self.clip_values[1]).any(): + raise ValueError('Invalid `clip_values`: min >= max.') + return True diff --git a/art/defences/thermometer_encoding.py b/art/defences/thermometer_encoding.py index 1ea5f652c4..17ce0ec387 100644 --- a/art/defences/thermometer_encoding.py +++ b/art/defences/thermometer_encoding.py @@ -32,21 +32,23 @@ class ThermometerEncoding(Preprocessor): """ Implement the thermometer encoding defence approach. Defence method from https://openreview.net/forum?id=S18Su--CW. """ - params = ['num_space', 'clip_values'] + params = ['num_space', 'channel_index', 'clip_values'] - def __init__(self, num_space=10, clip_values=(0, 1)): + def __init__(self, num_space=10, channel_index=3, clip_values=None): """ Create an instance of thermometer encoding. :param num_space: Number of evenly spaced levels within [0, 1]. :type num_space: `int` + :param channel_index: Index of the axis in data containing the color channels or features. + :type channel_index: `int` :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed for features. :type clip_values: `tuple` """ super(ThermometerEncoding, self).__init__() self._is_fitted = True - self.set_params(num_space=num_space, clip_values=clip_values) + self.set_params(num_space=num_space, channel_index=channel_index, clip_values=clip_values) @property def apply_fit(self): @@ -58,7 +60,7 @@ def apply_predict(self): def __call__(self, x, y=None): """ - Apply thermometer encoding to sample `x`. + Apply thermometer encoding to sample `x`. The new axis with the encoding is added as last dimension. :param x: Sample to encode with shape `(batch_size, width, height, depth)`. :type x: `np.ndarray` @@ -67,12 +69,11 @@ def __call__(self, x, y=None): :return: Encoded sample with shape `(batch_size, width, height, depth x num_space)`. :rtype: `np.ndarray` """ - result = [] - for c in range(x.shape[-1]): - result.append(self._perchannel(x[:, :, :, c])) + result = np.apply_along_axis(self._perchannel, self.channel_index, x) - result = np.concatenate(result, axis=3) - result = np.clip(result, self.clip_values[0], self.clip_values[1]) + # result = np.concatenate(result, axis=self.channel_index) + if hasattr(self, 'clip_values') and self.clip_values is not None: + np.clip(result, self.clip_values[0], self.clip_values[1], out=result) return result.astype(NUMPY_DTYPE), y @@ -94,9 +95,7 @@ def _perchannel(self, x): for i in reversed(range(1, self.num_space)): onehot_rep[:, i] += np.sum(onehot_rep[:, :i], axis=1) - result = onehot_rep.reshape(list(x.shape) + [self.num_space]) - - return result + return onehot_rep.flatten() def estimate_gradient(self, x, grad): """ @@ -112,9 +111,10 @@ def estimate_gradient(self, x, grad): :return: The gradient (estimate) of the defence. :rtype: `np.ndarray` """ - thermometer_grad = np.zeros(x.shape + (self.num_space,)) + thermometer_grad = np.zeros(x.shape[:-1] + (x.shape[-1] * self.num_space,)) mask = np.array([x > k / self.num_space for k in range(self.num_space)]) mask = np.moveaxis(mask, 0, -1) + mask = mask.reshape(thermometer_grad.shape) thermometer_grad[mask] = 1 return grad * thermometer_grad @@ -131,6 +131,8 @@ def set_params(self, **kwargs): :param num_space: Number of evenly spaced levels within [0, 1]. :type num_space: `int` + :param channel_index: Index of the axis in data containing the color channels or features. + :type channel_index: `int` :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed for features. :type clip_values: `tuple` @@ -142,9 +144,10 @@ def set_params(self, **kwargs): logger.error('Number of evenly spaced levels must be a positive integer.') raise ValueError('Number of evenly spaced levels must be a positive integer.') - if len(self.clip_values) != 2: - raise ValueError('`clip_values` should be a tuple of 2 floats containing the allowed data range.') - if self.clip_values[0] >= self.clip_values[1]: - raise ValueError('Invalid `clip_values`: min >= max.') + if hasattr(self, 'clip_values') and self.clip_values is not None: + if len(self.clip_values) != 2: + raise ValueError('`clip_values` should be a tuple of 2 floats containing the allowed data range.') + if np.array(self.clip_values[0] >= self.clip_values[1]).any(): + raise ValueError('Invalid `clip_values`: min >= max.') return True diff --git a/art/defences/variance_minimization.py b/art/defences/variance_minimization.py index 25ecf330f9..860d46e9fe 100644 --- a/art/defences/variance_minimization.py +++ b/art/defences/variance_minimization.py @@ -76,16 +76,21 @@ def __call__(self, x, y=None): :return: Similar samples. :rtype: `np.ndarray` """ - x_ = x.copy() + if len(x.shape) == 2: + raise ValueError('Feature vectors detected. Variance minimization can only be applied to data with spatial' + 'dimensions.') - # Minimize one image at a time - for i, xi in enumerate(x_): - mask = (np.random.rand(xi.shape[0], xi.shape[1], xi.shape[2]) < self.prob).astype('int') - x_[i] = self._minimize(xi, mask) + x_preproc = x.copy() - x_ = np.clip(x_, self.clip_values[0], self.clip_values[1]) + # Minimize one input at a time + for i, xi in enumerate(x_preproc): + mask = (np.random.rand(*xi.shape) < self.prob).astype('int') + x_preproc[i] = self._minimize(xi, mask) - return x_.astype(NUMPY_DTYPE), y + if hasattr(self, 'clip_values') and self.clip_values is not None: + np.clip(x_preproc, self.clip_values[0], self.clip_values[1], out=x_preproc) + + return x_preproc.astype(NUMPY_DTYPE), y def estimate_gradient(self, x, grad): return grad @@ -230,7 +235,7 @@ def set_params(self, **kwargs): if len(self.clip_values) != 2: raise ValueError('`clip_values` should be a tuple of 2 floats containing the allowed data range.') - if self.clip_values[0] >= self.clip_values[1]: + if np.array(self.clip_values[0] >= self.clip_values[1]).any(): raise ValueError('Invalid `clip_values`: min >= max.') return True diff --git a/art/metrics.py b/art/metrics.py index 1f245c22bc..82e990eb9f 100644 --- a/art/metrics.py +++ b/art/metrics.py @@ -284,7 +284,8 @@ def clever_t(classifier, x, target_class, nb_batches, batch_size, radius, norm, shape) rand_pool += np.repeat(np.array([x]), pool_factor * batch_size, 0) rand_pool = rand_pool.astype(NUMPY_DTYPE) - np.clip(rand_pool, classifier.clip_values[0], classifier.clip_values[1], out=rand_pool) + if hasattr(classifier, 'clip_values') and classifier.clip_values is not None: + np.clip(rand_pool, classifier.clip_values[0], classifier.clip_values[1], out=rand_pool) # Change norm since q = p / (p-1) if norm == 1: diff --git a/art/utils.py b/art/utils.py index 26308fb967..418f73be46 100644 --- a/art/utils.py +++ b/art/utils.py @@ -299,27 +299,51 @@ def get_labels_np_array(preds): return y -def preprocess(x, y, nb_classes=10, max_value=255): +# TODO change preprocess to normalize feature-wise? +def preprocess(x, y, nb_classes=10, clip_values=None): """Scales `x` to [0, 1] and converts `y` to class categorical confidences. - :param x: Data instances + :param x: Data instances. :type x: `np.ndarray` - :param y: Labels + :param y: Labels. :type y: `np.ndarray` - :param nb_classes: Number of classes in dataset + :param nb_classes: Number of classes in dataset. :type nb_classes: `int` - :param max_value: Original maximum allowed value for features - :type max_value: `int` - :return: rescaled values of `x`, `y` + :param clip_values: Original data range allowed value for features, either one respective scalar or one value per + feature. + :type clip_values: `tuple(float, float)` or `tuple(np.ndarray, np.ndarray)` + :return: Rescaled values of `x`, `y` :rtype: `tuple` """ - x = x.astype('float32') / max_value - y = to_categorical(y, nb_classes) + if clip_values is None: + min_, max_ = np.amin(x), np.amax(x) + else: + min_, max_ = clip_values + + normalized_x = (x - min_) / (max_ - min_) + categorical_y = to_categorical(y, nb_classes) - return x, y + return normalized_x, categorical_y def compute_success(classifier, x_clean, labels, x_adv, targeted=False): + """ + Compute the success rate of an attack based on clean samples, adversarial samples and targets or correct labels. + + :param classifier: Classifier used for prediction. + :type classifier: :class:`.Classifier` + :param x_clean: Original clean samples. + :type x_clean: `np.ndarray` + :param labels: Correct labels of `x_clean` if the attack is untargeted, or target labels of the attack otherwise. + :type labels: `np.ndarray` + :param x_adv: Adversarial samples to be evaluated. + :type x_adv: `np.ndarray` + :param targeted: `True` if the attack is targeted. In that case, `labels` are treated as target classes instead of + correct labels of the clean samples.s + :type targeted: `bool` + :return: Percentage of successful adversarial samples. + :rtype: `float` + """ adv_preds = np.argmax(classifier.predict(x_adv), axis=1) if targeted: rate = np.sum(adv_preds == np.argmax(labels, axis=1)) / x_adv.shape[0] @@ -395,8 +419,8 @@ def load_batch(fpath): min_, max_ = 0, 255 if not raw: min_, max_ = 0., 1. - x_train, y_train = preprocess(x_train, y_train) - x_test, y_test = preprocess(x_test, y_test) + x_train, y_train = preprocess(x_train, y_train, max_value=255) + x_test, y_test = preprocess(x_test, y_test, max_value=255) return (x_train, y_train), (x_test, y_test), min_, max_ @@ -474,6 +498,61 @@ def load_stl(): return (x_train, y_train), (x_test, y_test), min_, max_ +def load_iris(raw=False, test_set=.3): + """ + Loads the UCI Iris dataset from `DATA_PATH` or downloads it if necessary. + + :param raw: `True` if no preprocessing should be applied to the data. Otherwise, data is normalized to 1. + :type raw: `bool` + :param test_set: Proportion of the data to use as validation split. The value should be between o and 1. + :type test_set: `float` + :return: Entire dataset and labels. + :rtype: `(np.ndarray, np.ndarray)` + """ + from art import DATA_PATH, NUMPY_DTYPE + + # Download data if needed + path = get_file('iris.data', path=DATA_PATH, extract=False, + url='https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data') + + data = np.loadtxt(path, delimiter=',', usecols=(0, 1, 2, 3), dtype=NUMPY_DTYPE) + labels = np.loadtxt(path, delimiter=',', usecols=4, dtype=str) + + # Preprocess + if not raw: + label_map = { + 'Iris-setosa': 0, + 'Iris-versicolor': 1, + 'Iris-virginica': 2 + } + labels = np.array([label_map[labels[i]] for i in range(labels.size)], dtype=np.int32) + data, labels = preprocess(data, labels, nb_classes=3) + min_, max_ = np.amin(data), np.amax(data) + + # Split training and test sets + split_index = int((1 - test_set) * len(data) / 3) + x_train = np.vstack((data[:split_index], data[50:50+split_index], data[100:100+split_index])) + y_train = np.vstack((labels[:split_index], labels[50:50+split_index], labels[100:100+split_index])) + + if split_index >= 49: + x_test, y_test = None, None + else: + + x_test = np.vstack((data[split_index:50], data[50+split_index:100], data[100+split_index:])) + y_test = np.vstack((labels[split_index:50], labels[50+split_index:100], labels[100+split_index:])) + assert len(x_train) + len(x_test) == 150 + + # Shuffle test set + random_indices = np.random.permutation(len(y_test)) + x_test, y_test = x_test[random_indices], y_test[random_indices] + + # Shuffle training set + random_indices = np.random.permutation(len(y_train)) + x_train, y_train = x_train[random_indices], y_train[random_indices] + + return (x_train, y_train), (x_test, y_test), min_, max_ + + def load_dataset(name): """ Loads or downloads the dataset corresponding to `name`. Options are: `mnist`, `cifar10` and `stl10`. @@ -484,13 +563,14 @@ def load_dataset(name): :rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float` :raises NotImplementedError: If the dataset is unknown. """ - if "mnist" in name: return load_mnist() - elif "cifar10" in name: + if "cifar10" in name: return load_cifar10() - elif "stl10" in name: + if "stl10" in name: return load_stl() + if "iris" in name: + return load_iris() raise NotImplementedError("There is no loader for dataset '{}'.".format(name)) @@ -609,122 +689,43 @@ def clip_and_round(x, clip_values, round_samples): :param x: Sample input with shape as expected by the model. :type x: `np.ndarray` :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed - for features. + for features, or `None` if no clipping should be performed. :type clip_values: `tuple` - :param round_samples: The resolution of the input domain to round the data to, e.g., 1.0, or 1/255. Set to 0 to disable. + :param round_samples: The resolution of the input domain to round the data to, e.g., 1.0, or 1/255. Set to 0 to + disable. :type round_samples: `float` """ if round_samples == 0: return x - x = np.clip(x, *clip_values) + if clip_values is not None: + np.clip(x, clip_values[0], clip_values[1], out=x) x = np.around(x / round_samples) * round_samples return x -# -------------------------------------------------------------------------------------------------- PRE-TRAINED MODELS - - -def _tf_initializer_w_conv2d(_, dtype, partition_info): - """ - Initializer of weights in convolution layer for Tensorflow. - - :return: Tensorflow constant - :rtype: tf.constant - """ - import tensorflow as tf - - w_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_CONV2D.npy')) - return tf.constant(w_conv2d, dtype) - - -def _kr_initializer_w_conv2d(_, dtype=None): - """ - Initializer of weights in convolution layer for Keras. - - :return: Keras variable - :rtype: k.variable - """ - import keras.backend as k - - w_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_CONV2D.npy')) - return k.variable(value=w_conv2d, dtype=dtype) - - -def _tf_initializer_b_conv2d(_, dtype, partition_info): - """ - Initializer of biases in convolution layer for Tensorflow. - - :return: Tensorflow constant - :rtype: tf.constant - """ - import tensorflow as tf +# ----------------------------------------------------------------------------------------------- TEST MODELS FOR MNIST - b_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_CONV2D.npy')) - return tf.constant(b_conv2d, dtype) +def _tf_weights_loader(dataset, weights_type, layer='DENSE'): + filename = str(weights_type) + '_' + str(layer) + '_' + str(dataset) + '.npy' -def _kr_initializer_b_conv2d(_, dtype=None): - """ - Initializer of weights in convolution layer for Keras. - - :return: Keras variable - :rtype: k.variable - """ - import keras.backend as k - - b_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_CONV2D.npy')) - return k.variable(value=b_conv2d, dtype=dtype) - - -def _tf_initializer_w_dense(_, dtype, partition_info): - """ - Initializer of weights in dense layer for Tensorflow. - - :return: Tensorflow constant - :rtype: tf.constant - """ - import tensorflow as tf + def _tf_initializer(_, dtype, partition_info): + import tensorflow as tf - w_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_DENSE.npy')) - return tf.constant(w_dense, dtype) + weights = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', filename)) + return tf.constant(weights, dtype) + return _tf_initializer -def _kr_initializer_w_dense(_, dtype=None): - """ - Initializer of weights in dense layer for Keras. - :return: Keras varibale - :rtype: k.variable - """ +def _kr_weights_loader(dataset, weights_type, layer='DENSE'): import keras.backend as k + filename = str(weights_type) + '_' + str(layer) + '_' + str(dataset) + '.npy' - w_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_DENSE.npy')) - return k.variable(value=w_dense, dtype=dtype) - - -def _tf_initializer_b_dense(_, dtype, partition_info): - """ - Initializer of biases in dense layer for Tensorflow. + def _kr_initializer(_, dtype=None): + weights = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', filename)) + return k.variable(value=weights, dtype=dtype) - :return: Tensorflow constant - :rtype: tf.constant - """ - import tensorflow as tf - - b_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_DENSE.npy')) - return tf.constant(b_dense, dtype) - - -def _kr_initializer_b_dense(_, dtype=None): - """ - Initializer of biases in dense layer for Keras. - - :return: Keras variable - :rtype: k.variable - """ - import keras.backend as k - - b_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_DENSE.npy')) - return k.variable(value=b_dense, dtype=dtype) + return _kr_initializer def get_classifier_tf(): @@ -747,14 +748,15 @@ def get_classifier_tf(): output_ph = tf.placeholder(tf.int32, shape=[None, 10]) # Define the tensorflow graph - conv = tf.layers.conv2d(input_ph, 1, 7, activation=tf.nn.relu, kernel_initializer=_tf_initializer_w_conv2d, - bias_initializer=_tf_initializer_b_conv2d) + conv = tf.layers.conv2d(input_ph, 1, 7, activation=tf.nn.relu, + kernel_initializer=_tf_weights_loader('MNIST', 'W', 'CONV2D'), + bias_initializer=_tf_weights_loader('MNIST', 'B', 'CONV2D')) conv = tf.layers.max_pooling2d(conv, 4, 4) flattened = tf.contrib.layers.flatten(conv) # Logits layer - logits = tf.layers.dense(flattened, 10, kernel_initializer=_tf_initializer_w_dense, - bias_initializer=_tf_initializer_b_dense) + logits = tf.layers.dense(flattened, 10, kernel_initializer=_tf_weights_loader('MNIST', 'W', 'DENSE'), + bias_initializer=_tf_weights_loader('MNIST', 'B', 'DENSE')) # Train operator loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=output_ph)) @@ -793,17 +795,18 @@ def get_classifier_kr(): # Create simple CNN model = Sequential() model.add(Conv2D(1, kernel_size=(7, 7), activation='relu', input_shape=(28, 28, 1), - kernel_initializer=_kr_initializer_w_conv2d, bias_initializer=_kr_initializer_b_conv2d)) + kernel_initializer=_kr_weights_loader('MNIST', 'W', 'CONV2D'), + bias_initializer=_kr_weights_loader('MNIST', 'B', 'CONV2D'))) model.add(MaxPooling2D(pool_size=(4, 4))) model.add(Flatten()) - model.add(Dense(10, activation='softmax', kernel_initializer=_kr_initializer_w_dense, - bias_initializer=_kr_initializer_b_dense)) + model.add(Dense(10, activation='softmax', kernel_initializer=_kr_weights_loader('MNIST', 'W', 'DENSE'), + bias_initializer=_kr_weights_loader('MNIST', 'B', 'DENSE'))) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01), metrics=['accuracy']) # Get classifier - krc = KerasClassifier((0, 1), model, use_logits=False) + krc = KerasClassifier(model, clip_values=(0, 1), use_logits=False) return krc, sess @@ -826,10 +829,10 @@ class Model(nn.Module): def __init__(self): super(Model, self).__init__() - w_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_CONV2D.npy')) - b_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_CONV2D.npy')) - w_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_DENSE.npy')) - b_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_DENSE.npy')) + w_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_CONV2D_MNIST.npy')) + b_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_CONV2D_MNIST.npy')) + w_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_DENSE_MNIST.npy')) + b_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_DENSE_MNIST.npy')) self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7) w_conv2d_pt = np.swapaxes(w_conv2d, 0, 2) @@ -858,6 +861,146 @@ def forward(self, x): optimizer = optim.Adam(model.parameters(), lr=0.01) # Get classifier - ptc = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28), 10) + ptc = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(1, 28, 28), nb_classes=10, + clip_values=(0, 1)) + + return ptc + + +# ------------------------------------------------------------------------------------------------ TEST MODELS FOR IRIS + +def get_iris_classifier_tf(): + """ + Standard Tensorflow classifier for unit testing. + + The following hyper-parameters were used to obtain the weights and biases: + - learning_rate: 0.01 + - batch size: 5 + - number of epochs: 200 + - optimizer: tf.train.AdamOptimizer + The model is trained of 70% of the dataset, and 30% of the training set is used as validation split. + + :return: The trained model for Iris dataset and the session. + :rtype: `tuple(TFClassifier, tf.Session)` + """ + import tensorflow as tf + from art.classifiers import TFClassifier + + # Define input and output placeholders + input_ph = tf.placeholder(tf.float32, shape=[None, 4]) + output_ph = tf.placeholder(tf.int32, shape=[None, 3]) + + # Define the tensorflow graph + dense1 = tf.layers.dense(input_ph, 10, kernel_initializer=_tf_weights_loader('IRIS', 'W', 'DENSE1'), + bias_initializer=_tf_weights_loader('IRIS', 'B', 'DENSE1')) + dense2 = tf.layers.dense(dense1, 10, kernel_initializer=_tf_weights_loader('IRIS', 'W', 'DENSE2'), + bias_initializer=_tf_weights_loader('IRIS', 'B', 'DENSE2')) + logits = tf.layers.dense(dense2, 3, kernel_initializer=_tf_weights_loader('IRIS', 'W', 'DENSE3'), + bias_initializer=_tf_weights_loader('IRIS', 'B', 'DENSE3')) + + # Train operator + loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=output_ph)) + + # Tensorflow session and initialization + sess = tf.Session() + sess.run(tf.global_variables_initializer()) + + # Train the classifier + tfc = TFClassifier(clip_values=(0, 1), input_ph=input_ph, logits=logits, output_ph=output_ph, train=None, + loss=loss, learning=None, sess=sess, channel_index=1) + + return tfc, sess + + +def get_iris_classifier_kr(): + """ + Standard Keras classifier for unit testing on Iris dataset. The weights and biases are identical to the Tensorflow + model in `get_iris_classifier_tf`. + + :return: The trained model for Iris dataset and the session. + :rtype: `tuple(KerasClassifier, tf.Session)` + """ + import keras + import keras.backend as k + from keras.models import Sequential + from keras.layers import Dense + import tensorflow as tf + + from art.classifiers import KerasClassifier + + # Initialize a tf session + sess = tf.Session() + k.set_session(sess) + + # Create simple CNN + model = Sequential() + model.add(Dense(10, input_shape=(4,), activation='relu', + kernel_initializer=_kr_weights_loader('IRIS', 'W', 'DENSE1'), + bias_initializer=_kr_weights_loader('IRIS', 'B', 'DENSE1'))) + model.add(Dense(10, activation='relu', kernel_initializer=_kr_weights_loader('IRIS', 'W', 'DENSE2'), + bias_initializer=_kr_weights_loader('IRIS', 'B', 'DENSE2'))) + model.add(Dense(3, activation='softmax', kernel_initializer=_kr_weights_loader('IRIS', 'W', 'DENSE3'), + bias_initializer=_kr_weights_loader('IRIS', 'B', 'DENSE3'))) + model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(lr=0.001), metrics=['accuracy']) + + # Get classifier + krc = KerasClassifier(model, clip_values=(0, 1), use_logits=False, channel_index=1) + + return krc, sess + + +def get_iris_classifier_pt(): + """ + Standard PyTorch classifier for unit testing on Iris dataset. + + :return: Trained model for Iris dataset. + :rtype: :class:`.PyTorchClassifier` + """ + from art.classifiers import PyTorchClassifier + + class Model(nn.Module): + """ + Create Iris model for PyTorch. + + The weights and biases are identical to the Tensorflow model in `get_iris_classifier_tf`. + """ + + def __init__(self): + super(Model, self).__init__() + + w_dense1 = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_DENSE1_IRIS.npy')) + b_dense1 = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_DENSE1_IRIS.npy')) + w_dense2 = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_DENSE2_IRIS.npy')) + b_dense2 = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_DENSE2_IRIS.npy')) + w_dense3 = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_DENSE3_IRIS.npy')) + b_dense3 = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_DENSE3_IRIS.npy')) + + self.fully_connected1 = nn.Linear(4, 10) + self.fully_connected1.weight = nn.Parameter(torch.Tensor(np.transpose(w_dense1))) + self.fully_connected1.bias = nn.Parameter(torch.Tensor(b_dense1)) + self.fully_connected2 = nn.Linear(10, 10) + self.fully_connected2.weight = nn.Parameter(torch.Tensor(np.transpose(w_dense2))) + self.fully_connected2.bias = nn.Parameter(torch.Tensor(b_dense2)) + self.fully_connected3 = nn.Linear(10, 3) + self.fully_connected3.weight = nn.Parameter(torch.Tensor(np.transpose(w_dense3))) + self.fully_connected3.bias = nn.Parameter(torch.Tensor(b_dense3)) + + def forward(self, x): + x = self.fully_connected1(x) + x = self.fully_connected2(x) + logit_output = self.fully_connected3(x) + + return logit_output + + # Define the network + model = Model() + + # Define a loss function and optimizer + loss_fn = nn.CrossEntropyLoss() + optimizer = optim.Adam(model.parameters(), lr=0.01) + + # Get classifier + ptc = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(4,), nb_classes=3, + clip_values=(0, 1), channel_index=1) return ptc diff --git a/art/wrappers/expectation.py b/art/wrappers/expectation.py index 12ad30d5db..93df64e515 100644 --- a/art/wrappers/expectation.py +++ b/art/wrappers/expectation.py @@ -71,7 +71,7 @@ def loss_gradient(self, x, y): :param x: Sample input with shape as expected by the model. :type x: `np.ndarray` - :param y: Correct labels, one-vs-rest encoding. + :param y: Correct labels, one-hot encoded. :type y: `np.ndarray` :return: Array of gradients of the same shape as `x`. :rtype: `np.ndarray` @@ -102,6 +102,7 @@ def class_gradient(self, x, label=None, logits=False): """ logger.info('Apply Expectation over Transformations.') class_gradient = self.classifier.class_gradient(next(self.transformation())(x), label, logits) - for _ in range(self.sample_size-1): + for _ in range(self.sample_size - 1): class_gradient += self.classifier.class_gradient(next(self.transformation())(x), label, logits) - return class_gradient/self.sample_size + + return class_gradient / self.sample_size diff --git a/models/B_CONV2D.npy b/models/B_CONV2D_MNIST.npy similarity index 100% rename from models/B_CONV2D.npy rename to models/B_CONV2D_MNIST.npy diff --git a/models/B_DENSE1_IRIS.npy b/models/B_DENSE1_IRIS.npy new file mode 100644 index 0000000000..5d2975fa0f Binary files /dev/null and b/models/B_DENSE1_IRIS.npy differ diff --git a/models/B_DENSE2_IRIS.npy b/models/B_DENSE2_IRIS.npy new file mode 100644 index 0000000000..ed175f2467 Binary files /dev/null and b/models/B_DENSE2_IRIS.npy differ diff --git a/models/B_DENSE3_IRIS.npy b/models/B_DENSE3_IRIS.npy new file mode 100644 index 0000000000..79f0d0d08d Binary files /dev/null and b/models/B_DENSE3_IRIS.npy differ diff --git a/models/B_DENSE.npy b/models/B_DENSE_MNIST.npy similarity index 100% rename from models/B_DENSE.npy rename to models/B_DENSE_MNIST.npy diff --git a/models/W_CONV2D.npy b/models/W_CONV2D_MNIST.npy similarity index 100% rename from models/W_CONV2D.npy rename to models/W_CONV2D_MNIST.npy diff --git a/models/W_DENSE1_IRIS.npy b/models/W_DENSE1_IRIS.npy new file mode 100644 index 0000000000..366724fa9a Binary files /dev/null and b/models/W_DENSE1_IRIS.npy differ diff --git a/models/W_DENSE2_IRIS.npy b/models/W_DENSE2_IRIS.npy new file mode 100644 index 0000000000..5e0e83f990 Binary files /dev/null and b/models/W_DENSE2_IRIS.npy differ diff --git a/models/W_DENSE3_IRIS.npy b/models/W_DENSE3_IRIS.npy new file mode 100644 index 0000000000..306ee4a29c Binary files /dev/null and b/models/W_DENSE3_IRIS.npy differ diff --git a/models/W_DENSE.npy b/models/W_DENSE_MNIST.npy similarity index 100% rename from models/W_DENSE.npy rename to models/W_DENSE_MNIST.npy diff --git a/run_tests.sh b/run_tests.sh index 323ab1a307..31902c4e25 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,2 +1,11 @@ #!/usr/bin/env bash -python -m unittest discover +python -m unittest discover tests/attacks -p 'test_*.py' +python -m unittest discover tests/classifiers -p 'test_*.py' +python -m unittest discover tests/defences -p 'test_*.py' +python -m unittest discover tests/detection -p 'test_*.py' +python -m unittest discover tests/poison_detection -p 'test_*.py' +python -m unittest discover tests/wrappers -p 'test_*.py' +python -m unittest tests.test_data_generators +python -m unittest tests.test_metrics +python -m unittest tests.test_utils +python -m unittest tests.test_visualization diff --git a/tests/attacks/test_adversarial_patch.py b/tests/attacks/test_adversarial_patch.py index 3fa1aaf861..61ac711ce9 100644 --- a/tests/attacks/test_adversarial_patch.py +++ b/tests/attacks/test_adversarial_patch.py @@ -117,5 +117,19 @@ def test_ptclassifier(self): self.assertTrue(patch_adv[0, 14, 14] - 19.790434152473054 < 0.01) self.assertTrue(np.sum(patch_adv) - 383.5670772794207 < 0.01) - if __name__ == '__main__': - unittest.main() + def test_failure_feature_vectors(self): + attack_params = {"rotation_max": 22.5, "scale_min": 0.1, "scale_max": 1.0, + "learning_rate": 5.0, "number_of_steps": 5, "patch_shape": (1, 28, 28), "batch_size": 10} + attack = AdversarialPatch(classifier=None) + attack.set_params(**attack_params) + data = np.random.rand(10, 4) + + # Assert that value error is raised for feature vectors + with self.assertRaises(ValueError) as context: + attack.generate(data) + + self.assertTrue('Feature vectors detected.' in str(context.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/attacks/test_boundary.py b/tests/attacks/test_boundary.py index d0bac58dd6..d83f336855 100644 --- a/tests/attacks/test_boundary.py +++ b/tests/attacks/test_boundary.py @@ -25,7 +25,9 @@ import tensorflow as tf from art.attacks import BoundaryAttack -from art.utils import load_mnist, random_targets, master_seed, get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.classifiers import KerasClassifier +from art.utils import load_dataset, random_targets, master_seed, get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') @@ -40,7 +42,7 @@ class TestBoundary(unittest.TestCase): @classmethod def setUpClass(cls): # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN] x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -166,5 +168,88 @@ def test_ptclassifier(self): self.assertTrue((y_pred != y_pred_adv).any()) +class TestBoundaryVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + attack = BoundaryAttack(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with boundary adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = BoundaryAttack(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with boundary adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + # Test untargeted attack + attack = BoundaryAttack(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with boundary adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = BoundaryAttack(classifier, targeted=True, max_iter=10) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted boundary on Iris: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + attack = BoundaryAttack(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with boundary adversarial examples: %.2f%%', (acc * 100)) + + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_carlini.py b/tests/attacks/test_carlini.py index 02fc8f9840..69ed9bc117 100644 --- a/tests/attacks/test_carlini.py +++ b/tests/attacks/test_carlini.py @@ -25,12 +25,13 @@ import tensorflow as tf from art.attacks import CarliniL2Method, CarliniLInfMethod -from art.utils import load_mnist, random_targets, master_seed, get_classifier_tf, get_classifier_kr +from art.classifiers import KerasClassifier +from art.utils import load_dataset, random_targets, master_seed, get_classifier_tf, get_classifier_kr +from art.utils import get_classifier_pt, get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') BATCH_SIZE = 100 -NB_TRAIN = 5000 NB_TEST = 10 @@ -42,8 +43,7 @@ class TestCarliniL2(unittest.TestCase): @classmethod def setUpClass(cls): # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() - x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN] + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -156,39 +156,122 @@ def test_krclassifier(self): # Clean-up k.clear_session() - # def test_ptclassifier(self): - # """ - # Third test with the PyTorchClassifier. - # :return: - # """ - # # Build PyTorchClassifier - # ptc = get_classifier_pt() - # - # # Get MNIST - # (_, _), (x_test, y_test) = self.mnist - # x_test = np.swapaxes(x_test, 1, 3) - # - # # First attack - # cl2m = CarliniL2Method(classifier=ptc, targeted=True, max_iter=10) - # params = {'y': random_targets(y_test, ptc.nb_classes)} - # x_test_adv = cl2m.generate(x_test, **params) - # self.assertFalse((x_test == x_test_adv).all()) - # self.assertTrue((x_test_adv <= 1.0001).all()) - # self.assertTrue((x_test_adv >= -0.0001).all()) - # target = np.argmax(params['y'], axis=1) - # y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1) - # self.assertTrue((target == y_pred_adv).any()) - # logger.info('CW2 Success Rate: %.2f', (sum(target == y_pred_adv) / float(len(target)))) - # - # # Second attack - # cl2m = CarliniL2Method(classifier=ptc, targeted=False, max_iter=10) - # x_test_adv = cl2m.generate(x_test) - # self.assertTrue((x_test_adv <= 1.0001).all()) - # self.assertTrue((x_test_adv >= -0.0001).all()) - # target = np.argmax(params['y'], axis=1) - # y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1) - # self.assertTrue((target != y_pred_adv).any()) - # logger.info('CW2 Success Rate: %.2f', (sum(target != y_pred_adv) / float(len(target)))) + def test_ptclassifier(self): + """ + Third test with the PyTorchClassifier. + :return: + """ + # Build PyTorchClassifier + ptc = get_classifier_pt() + + # Get MNIST + (_, _), (x_test, y_test) = self.mnist + x_test = np.swapaxes(x_test, 1, 3) + + # First attack + cl2m = CarliniL2Method(classifier=ptc, targeted=True, max_iter=10) + params = {'y': random_targets(y_test, ptc.nb_classes)} + x_test_adv = cl2m.generate(x_test, **params) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1.0001).all()) + self.assertTrue((x_test_adv >= -0.0001).all()) + target = np.argmax(params['y'], axis=1) + y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1) + self.assertTrue((target == y_pred_adv).any()) + logger.info('CW2 Success Rate: %.2f', (sum(target == y_pred_adv) / float(len(target)))) + + # Second attack + cl2m = CarliniL2Method(classifier=ptc, targeted=False, max_iter=10) + x_test_adv = cl2m.generate(x_test) + self.assertTrue((x_test_adv <= 1.0001).all()) + self.assertTrue((x_test_adv >= -0.0001).all()) + target = np.argmax(params['y'], axis=1) + y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1) + self.assertTrue((target != y_pred_adv).any()) + logger.info('CW2 Success Rate: %.2f', (sum(target != y_pred_adv) / float(len(target)))) + + +class TestCarliniL2Vectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + attack = CarliniL2Method(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with C&W adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = CarliniL2Method(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with C&W adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + # Test untargeted attack + attack = CarliniL2Method(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with C&W adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = CarliniL2Method(classifier, targeted=True, max_iter=10) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted C&W on Iris: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + attack = CarliniL2Method(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with C&W adversarial examples: %.2f%%', (acc * 100)) class TestCarliniLInf(TestCarliniL2): @@ -300,38 +383,112 @@ def test_krclassifier(self): # Clean-up k.clear_session() - # def test_ptclassifier(self): - # """ - # Third test with the PyTorchClassifier. - # :return: - # """ - # # Build PyTorchClassifier - # ptc = get_classifier_pt() - # - # # Get MNIST - # (x_train, y_train), (x_test, y_test) = self.mnist - # x_train = np.swapaxes(x_train, 1, 3) - # x_test = np.swapaxes(x_test, 1, 3) - # - # # First attack - # clinfm = CarliniLInfMethod(classifier=ptc, targeted=True, max_iter=10, eps=0.5) - # params = {'y': random_targets(y_test, ptc.nb_classes)} - # x_test_adv = clinfm.generate(x_test, **params) - # self.assertFalse((x_test == x_test_adv).all()) - # self.assertTrue((x_test_adv <= 1.0001).all()) - # self.assertTrue((x_test_adv >= -0.0001).all()) - # target = np.argmax(params['y'], axis=1) - # y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1) - # self.assertTrue((target == y_pred_adv).any()) - # - # # Second attack - # clinfm = CarliniLInfMethod(classifier=ptc, targeted=False, max_iter=10, eps=0.5) - # x_test_adv = clinfm.generate(x_test) - # self.assertTrue((x_test_adv <= 1.0001).all()) - # self.assertTrue((x_test_adv >= -0.0001).all()) - # target = np.argmax(params['y'], axis=1) - # y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1) - # self.assertTrue((target != y_pred_adv).any()) + def test_ptclassifier(self): + """ + Third test with the PyTorchClassifier. + :return: + """ + # Build PyTorchClassifier + ptc = get_classifier_pt() + + # Get MNIST + (x_train, y_train), (x_test, y_test) = self.mnist + x_train = np.swapaxes(x_train, 1, 3) + x_test = np.swapaxes(x_test, 1, 3) + + # First attack + clinfm = CarliniLInfMethod(classifier=ptc, targeted=True, max_iter=10, eps=0.5) + params = {'y': random_targets(y_test, ptc.nb_classes)} + x_test_adv = clinfm.generate(x_test, **params) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1.0001).all()) + self.assertTrue((x_test_adv >= -0.0001).all()) + target = np.argmax(params['y'], axis=1) + y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1) + self.assertTrue((target == y_pred_adv).any()) + + # Second attack + clinfm = CarliniLInfMethod(classifier=ptc, targeted=False, max_iter=10, eps=0.5) + x_test_adv = clinfm.generate(x_test) + self.assertTrue((x_test_adv <= 1.0001).all()) + self.assertTrue((x_test_adv >= -0.0001).all()) + target = np.argmax(params['y'], axis=1) + y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1) + self.assertTrue((target != y_pred_adv).any()) + + +class TestCarliniLInfVectors(TestCarliniL2Vectors): + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + attack = CarliniLInfMethod(classifier, targeted=False, max_iter=10, eps=0.5) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with C&W adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = CarliniLInfMethod(classifier, targeted=False, max_iter=10, eps=1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with C&W adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + # Test untargeted attack + attack = CarliniLInfMethod(classifier, targeted=False, max_iter=10, eps=0.5) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with C&W adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = CarliniLInfMethod(classifier, targeted=True, max_iter=10, eps=0.5) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted C&W on Iris: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + attack = CarliniLInfMethod(classifier, targeted=False, max_iter=10, eps=0.5) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with C&W adversarial examples: %.2f%%', (acc * 100)) if __name__ == '__main__': diff --git a/tests/attacks/test_deepfool.py b/tests/attacks/test_deepfool.py index b7bc6b7143..e6bf775ad0 100644 --- a/tests/attacks/test_deepfool.py +++ b/tests/attacks/test_deepfool.py @@ -25,8 +25,10 @@ import tensorflow as tf from art.attacks.deepfool import DeepFool -from art.utils import load_mnist, get_labels_np_array, master_seed +from art.classifiers import KerasClassifier +from art.utils import load_dataset, get_labels_np_array, master_seed from art.utils import get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') @@ -41,7 +43,7 @@ def setUpClass(cls): k.set_learning_phase(1) # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -144,5 +146,76 @@ def test_partial_grads(self): logger.info('Accuracy on adversarial test examples: %.2f%%', (acc * 100)) +class TestDeepFoolVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + attack = DeepFool(classifier, max_iter=5) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with DeepFool adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = DeepFool(classifier, max_iter=5) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with DeepFool adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + attack = DeepFool(classifier, max_iter=5) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with DeepFool adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + + attack = DeepFool(classifier, max_iter=5) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with DeepFool adversarial examples: %.2f%%', (acc * 100)) + + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_elastic_net.py b/tests/attacks/test_elastic_net.py index bddfa72744..2ba75e90fe 100644 --- a/tests/attacks/test_elastic_net.py +++ b/tests/attacks/test_elastic_net.py @@ -25,7 +25,9 @@ import tensorflow as tf from art.attacks import ElasticNet -from art.utils import load_mnist, random_targets, master_seed, get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.classifiers import KerasClassifier +from art.utils import load_dataset, random_targets, master_seed, get_classifier_tf, get_classifier_kr +from art.utils import get_classifier_pt, get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') @@ -42,7 +44,7 @@ class TestElasticNet(unittest.TestCase): @classmethod def setUpClass(cls): # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN] x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -95,9 +97,9 @@ def test_tfclassifier(self): self.assertTrue((x_test_adv >= -0.0001).all()) target = np.argmax(params['y'], axis=1) y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1) - logger.debug('EAD Target: %s', target) - logger.debug('EAD Actual: %s', y_pred_adv) - logger.info('EAD Success Rate: %.2f', (sum(target == y_pred_adv) / float(len(target)))) + logger.debug('EAD target: %s', target) + logger.debug('EAD actual: %s', y_pred_adv) + logger.info('EAD success rate on MNIST: %.2f%%', (100 * sum(target == y_pred_adv) / len(target))) self.assertTrue((target == y_pred_adv).any()) # Second attack @@ -108,9 +110,9 @@ def test_tfclassifier(self): self.assertTrue((x_test_adv >= -0.0001).all()) target = np.argmax(params['y'], axis=1) y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1) - logger.debug('EAD Target: %s', target) - logger.debug('EAD Actual: %s', y_pred_adv) - logger.info('EAD Success Rate: %.2f', (sum(target != y_pred_adv) / float(len(target)))) + logger.debug('EAD target: %s', target) + logger.debug('EAD actual: %s', y_pred_adv) + logger.info('EAD success rate on MNIST: %.2f%%', (100 * sum(target != y_pred_adv) / float(len(target)))) self.assertTrue((target != y_pred_adv).any()) # Third attack @@ -122,9 +124,9 @@ def test_tfclassifier(self): self.assertTrue((x_test_adv >= -0.0001).all()) y_pred = np.argmax(tfc.predict(x_test), axis=1) y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1) - logger.debug('EAD Target: %s', y_pred) - logger.debug('EAD Actual: %s', y_pred_adv) - logger.info('EAD Success Rate: %.2f', (sum(y_pred != y_pred_adv) / float(len(y_pred)))) + logger.debug('EAD target: %s', y_pred) + logger.debug('EAD actual: %s', y_pred_adv) + logger.info('EAD success rate: %.2f%%', (100 * sum(y_pred != y_pred_adv) / float(len(y_pred)))) self.assertTrue((y_pred != y_pred_adv).any()) # First attack without batching @@ -136,9 +138,9 @@ def test_tfclassifier(self): self.assertTrue((x_test_adv >= -0.0001).all()) target = np.argmax(params['y'], axis=1) y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1) - logger.debug('EAD Target: %s', target) - logger.debug('EAD Actual: %s', y_pred_adv) - logger.info('EAD Success Rate: %.2f', (sum(target == y_pred_adv) / float(len(target)))) + logger.debug('EAD target: %s', target) + logger.debug('EAD actual: %s', y_pred_adv) + logger.info('EAD success rate: %.2f%%', (100 * sum(target == y_pred_adv) / float(len(target)))) self.assertTrue((target == y_pred_adv).any()) # Second attack without batching @@ -149,9 +151,9 @@ def test_tfclassifier(self): self.assertTrue((x_test_adv >= -0.0001).all()) target = np.argmax(params['y'], axis=1) y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1) - logger.debug('EAD Target: %s', target) - logger.debug('EAD Actual: %s', y_pred_adv) - logger.info('EAD Success Rate: %.2f', (sum(target != y_pred_adv) / float(len(target)))) + logger.debug('EAD target: %s', target) + logger.debug('EAD actual: %s', y_pred_adv) + logger.info('EAD success rate: %.2f%%', (100 * sum(target != y_pred_adv) / float(len(target)))) self.assertTrue((target != y_pred_adv).any()) # Kill TF @@ -178,9 +180,9 @@ def test_krclassifier(self): self.assertTrue((x_test_adv >= -0.0001).all()) target = np.argmax(params['y'], axis=1) y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1) - logger.debug('EAD Target: %s', target) - logger.debug('EAD Actual: %s', y_pred_adv) - logger.info('EAD Success Rate: %.2f', (sum(target == y_pred_adv) / float(len(target)))) + logger.debug('EAD target: %s', target) + logger.debug('EAD actual: %s', y_pred_adv) + logger.info('EAD success rate: %.2f%%', (100 * sum(target == y_pred_adv) / float(len(target)))) self.assertTrue((target == y_pred_adv).any()) # Second attack @@ -191,9 +193,9 @@ def test_krclassifier(self): self.assertTrue((x_test_adv >= -0.0001).all()) target = np.argmax(params['y'], axis=1) y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1) - logger.debug('EAD Target: %s', target) - logger.debug('EAD Actual: %s', y_pred_adv) - logger.info('EAD Success Rate: %.2f', (sum(target != y_pred_adv) / float(len(target)))) + logger.debug('EAD target: %s', target) + logger.debug('EAD actual: %s', y_pred_adv) + logger.info('EAD success rate: %.2f', (100 * sum(target != y_pred_adv) / float(len(target)))) self.assertTrue((target != y_pred_adv).any()) # Kill Keras @@ -233,5 +235,88 @@ def test_ptclassifier(self): self.assertTrue((target != y_pred_adv).any()) +class TestElasticNetVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + attack = ElasticNet(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = 1. - np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('EAD success rate on Iris: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = ElasticNet(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = 1. - np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('EAD success rate on Iris: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + # Test untargeted attack + attack = ElasticNet(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = 1. - np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('EAD success rate on Iris: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = ElasticNet(classifier, targeted=True, max_iter=10) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Targeted EAD success rate on Iris: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + attack = ElasticNet(classifier, targeted=False, max_iter=10) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = 1. - np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('EAD success rate on Iris: %.2f%%', (acc * 100)) + + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_fast_gradient.py b/tests/attacks/test_fast_gradient.py index f7e089c01e..3e2d04e338 100644 --- a/tests/attacks/test_fast_gradient.py +++ b/tests/attacks/test_fast_gradient.py @@ -25,8 +25,9 @@ from art.attacks.fast_gradient import FastGradientMethod from art.classifiers import KerasClassifier -from art.utils import load_mnist, get_labels_np_array, master_seed -from art.utils import get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.utils import load_dataset, get_labels_np_array, master_seed +from art.utils import get_classifier_tf, get_classifier_kr, get_classifier_pt, random_targets +from art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') @@ -35,13 +36,13 @@ NB_TEST = 11 -class TestFastGradientMethod(unittest.TestCase): +class TestFastGradientMethodImages(unittest.TestCase): @classmethod def setUpClass(cls): k.set_learning_phase(1) # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -118,10 +119,10 @@ def _test_backend_mnist(self, classifier): self.assertFalse((y_test == test_y_pred).all()) acc = np.sum(np.argmax(train_y_pred, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0] - logger.info('Accuracy on adversarial train examples: %.2f%%', (acc * 100)) + logger.info('Accuracy on MNIST with FGM adversarial train examples: %.2f%%', (acc * 100)) acc = np.sum(np.argmax(test_y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0] - logger.info('Accuracy on adversarial test examples: %.2f%%', (acc * 100)) + logger.info('Accuracy on MNIST with FGM adversarial test examples: %.2f%%', (acc * 100)) # Test minimal perturbations attack_params = {"minimal": True, "eps_step": 0.1, "eps": 1.0} @@ -143,10 +144,12 @@ def _test_backend_mnist(self, classifier): self.assertFalse((y_test == test_y_pred).all()) acc = np.sum(np.argmax(train_y_pred, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0] - logger.info('Accuracy on adversarial train examples with minimal perturbation: %.2f%%', (acc * 100)) + logger.info('Accuracy on MNIST with FGM adversarial train examples with minimal perturbation: %.2f%%', + (acc * 100)) acc = np.sum(np.argmax(test_y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0] - logger.info('Accuracy on adversarial test examples with minimal perturbation: %.2f%%', (acc * 100)) + logger.info('Accuracy on MNIST with FGM adversarial test examples with minimal perturbation: %.2f%%', + (acc * 100)) # L_1 norm attack = FastGradientMethod(classifier, eps=1, norm=1) @@ -156,7 +159,7 @@ def _test_backend_mnist(self, classifier): test_y_pred = get_labels_np_array(classifier.predict(x_test_adv)) self.assertFalse((y_test == test_y_pred).all()) acc = np.sum(np.argmax(test_y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0] - logger.info('Accuracy on adversarial test examples with L1 norm: %.2f%%', (acc * 100)) + logger.info('Accuracy on MNIST with FGM adversarial test examples with L1 norm: %.2f%%', (acc * 100)) # L_2 norm attack = FastGradientMethod(classifier, eps=1, norm=2) @@ -166,7 +169,7 @@ def _test_backend_mnist(self, classifier): test_y_pred = get_labels_np_array(classifier.predict(x_test_adv)) self.assertFalse((y_test == test_y_pred).all()) acc = np.sum(np.argmax(test_y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0] - logger.info('Accuracy on adversarial test examples with L2 norm: %.2f%%', (acc * 100)) + logger.info('Accuracy on MNIST with FGM adversarial test examples with L2 norm: %.2f%%', (acc * 100)) # Test random initialisations attack = FastGradientMethod(classifier, num_random_init=3) @@ -176,7 +179,8 @@ def _test_backend_mnist(self, classifier): test_y_pred = get_labels_np_array(classifier.predict(x_test_adv)) self.assertFalse((y_test == test_y_pred).all()) acc = np.sum(np.argmax(test_y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0] - logger.info('Accuracy on adversarial test examples with 3 random initialisations: %.2f%%', (acc * 100)) + logger.info('Accuracy on MNIST with FGM adversarial test examples with 3 random initialisations: %.2f%%', + (acc * 100)) def test_with_defences(self): self._test_with_defences(custom_activation=False) @@ -191,7 +195,7 @@ def _test_with_defences(self, custom_activation=False): # Get the ready-trained Keras model model = self.classifier_k._model fs = FeatureSqueezing(bit_depth=1, clip_values=(0, 1)) - classifier = KerasClassifier((0, 1), model, defences=fs, custom_activation=custom_activation) + classifier = KerasClassifier(model=model, clip_values=(0, 1), defences=fs, custom_activation=custom_activation) attack = FastGradientMethod(classifier, eps=1) x_train_adv = attack.generate(x_train) @@ -208,11 +212,11 @@ def _test_with_defences(self, custom_activation=False): preds = classifier.predict(x_train_adv) acc = np.sum(np.argmax(preds, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0] - logger.info('Accuracy on adversarial train examples with feature squeezing: %.2f%%', (acc * 100)) + logger.info('Accuracy on MNIST with FGM adversarial train examples with feature squeezing: %.2f%%', (acc * 100)) preds = classifier.predict(x_test_adv) acc = np.sum(np.argmax(preds, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0] - logger.info('Accuracy on adversarial test examples: %.2f%%', (acc * 100)) + logger.info('Accuracy on MNIST with FGM adversarial test examples: %.2f%%', (acc * 100)) def _test_mnist_targeted(self, classifier): # Get MNIST @@ -251,5 +255,120 @@ def test_mnist_targeted(self): self._swap_axes() +class TestFastGradientVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Test untargeted attack + attack = FastGradientMethod(classifier, eps=.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with FGM adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = FastGradientMethod(classifier, targeted=True, eps=.1) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted FGM on Iris: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = FastGradientMethod(classifier, eps=1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv > 1).any()) + self.assertTrue((x_test_adv < 0).any()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with FGM adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + # Test untargeted attack + attack = FastGradientMethod(classifier, eps=.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with FGM adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = FastGradientMethod(classifier, targeted=True, eps=.1) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted FGM on Iris: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + + # Test untargeted attack + attack = FastGradientMethod(classifier, eps=.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with FGM adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = FastGradientMethod(classifier, targeted=True, eps=.1) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted FGM on Iris: %.2f%%', (acc * 100)) + + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_iterative_method.py b/tests/attacks/test_iterative_method.py index 0f43e27672..740f114d2a 100644 --- a/tests/attacks/test_iterative_method.py +++ b/tests/attacks/test_iterative_method.py @@ -7,8 +7,10 @@ import numpy as np from art.attacks.iterative_method import BasicIterativeMethod -from art.utils import load_mnist, get_labels_np_array, master_seed +from art.classifiers import KerasClassifier +from art.utils import load_dataset, get_labels_np_array, master_seed, random_targets from art.utils import get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') @@ -23,7 +25,7 @@ def setUpClass(cls): k.set_learning_phase(1) # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -120,10 +122,10 @@ def _test_backend_mnist(self, classifier): self.assertFalse((y_test == test_y_pred).all()) acc = np.sum(np.argmax(train_y_pred, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0] - logger.info('Accuracy on adversarial train examples with 3 random initialisations: %.2f%%', (acc * 100)) + logger.info('Accuracy on adversarial train examples with 3 random initializations: %.2f%%', (acc * 100)) acc = np.sum(np.argmax(test_y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0] - logger.info('Accuracy on adversarial test examples with 3 random initialisations: %.2f%%', (acc * 100)) + logger.info('Accuracy on adversarial test examples with 3 random initializations: %.2f%%', (acc * 100)) def _test_mnist_targeted(self, classifier): # Get MNIST @@ -160,5 +162,120 @@ def test_mnist_targeted(self): self._swap_axes() +class TestIterativeAttackVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Test untargeted attack + attack = BasicIterativeMethod(classifier, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with BIM adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = BasicIterativeMethod(classifier, targeted=True, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted BIM on Iris: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = BasicIterativeMethod(classifier, eps=1, eps_step=0.2) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv > 1).any()) + self.assertTrue((x_test_adv < 0).any()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with BIM adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + # Test untargeted attack + attack = BasicIterativeMethod(classifier, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with BIM adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = BasicIterativeMethod(classifier, targeted=True, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted BIM on Iris: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + + # Test untargeted attack + attack = BasicIterativeMethod(classifier, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with BIM adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = BasicIterativeMethod(classifier, targeted=True, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted BIM on Iris: %.2f%%', (acc * 100)) + + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_newtonfool.py b/tests/attacks/test_newtonfool.py index bf17d55374..0154605882 100644 --- a/tests/attacks/test_newtonfool.py +++ b/tests/attacks/test_newtonfool.py @@ -23,7 +23,9 @@ import numpy as np from art.attacks.newtonfool import NewtonFool -from art.utils import load_mnist, master_seed, get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.classifiers import KerasClassifier +from art.utils import load_dataset, master_seed, get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') @@ -40,7 +42,7 @@ class TestNewtonFool(unittest.TestCase): @classmethod def setUpClass(cls): # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN] x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -123,5 +125,77 @@ def test_ptclassifier(self): self.assertTrue((y_pred_max >= y_pred_adv_max).all()) +class TestNewtonFoolVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + attack = NewtonFool(classifier, max_iter=5) + x_test_adv = attack.generate(x_test) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with NewtonFool adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = NewtonFool(classifier, max_iter=5) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with NewtonFool adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + attack = NewtonFool(classifier, max_iter=5) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with NewtonFool adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + + attack = NewtonFool(classifier, max_iter=5) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with NewtonFool adversarial examples: %.2f%%', (acc * 100)) + + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_projected_gradient_descent.py b/tests/attacks/test_projected_gradient_descent.py index 68ce08cf92..9fbd2b75e3 100644 --- a/tests/attacks/test_projected_gradient_descent.py +++ b/tests/attacks/test_projected_gradient_descent.py @@ -24,8 +24,10 @@ import numpy as np from art.attacks.projected_gradient_descent import ProjectedGradientDescent -from art.utils import load_mnist, get_labels_np_array, master_seed +from art.classifiers import KerasClassifier +from art.utils import load_dataset, get_labels_np_array, master_seed, random_targets from art.utils import get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') @@ -40,7 +42,7 @@ def setUpClass(cls): k.set_learning_phase(1) # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -143,6 +145,120 @@ def _test_backend_mnist(self, classifier): logger.info('Accuracy on adversarial test examples with 3 random initialisations: %.2f%%', acc * 100) +class TestPGDVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Test untargeted attack + attack = ProjectedGradientDescent(classifier, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with PGD adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = ProjectedGradientDescent(classifier, targeted=True, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted PGD on Iris: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = ProjectedGradientDescent(classifier, eps=1, eps_step=0.2) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv > 1).any()) + self.assertTrue((x_test_adv < 0).any()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with PGD adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + # Test untargeted attack + attack = ProjectedGradientDescent(classifier, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with PGD adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = ProjectedGradientDescent(classifier, targeted=True, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted PGD on Iris: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + + # Test untargeted attack + attack = ProjectedGradientDescent(classifier, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with PGD adversarial examples: %.2f%%', (acc * 100)) + + # Test targeted attack + targets = random_targets(y_test, nb_classes=3) + attack = ProjectedGradientDescent(classifier, targeted=True, eps=1, eps_step=0.1) + x_test_adv = attack.generate(x_test, **{'y': targets}) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any()) + acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / y_test.shape[0] + logger.info('Success rate of targeted PGD on Iris: %.2f%%', (acc * 100)) + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_saliency_map.py b/tests/attacks/test_saliency_map.py index aff01764c9..812d725b10 100644 --- a/tests/attacks/test_saliency_map.py +++ b/tests/attacks/test_saliency_map.py @@ -25,13 +25,13 @@ import tensorflow as tf from art.attacks.saliency_map import SaliencyMapMethod -from art.utils import load_mnist, get_labels_np_array, to_categorical, master_seed +from art.classifiers import KerasClassifier +from art.utils import load_dataset, get_labels_np_array, to_categorical, master_seed from art.utils import get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') -# TODO add test with gamma < 1 - BATCH_SIZE = 10 NB_TRAIN = 100 NB_TEST = 2 @@ -43,7 +43,7 @@ def setUpClass(cls): k.set_learning_phase(1) # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -148,5 +148,77 @@ def _test_mnist_targeted(self, classifier): logger.info('Accuracy on adversarial examples: %.2f%%', (acc * 100)) +class TestSaliencyMapVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + attack = SaliencyMapMethod(classifier, theta=1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with JSMA adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = SaliencyMapMethod(classifier, theta=1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with JSMA adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + attack = SaliencyMapMethod(classifier, theta=1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with JSMA adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + + attack = SaliencyMapMethod(classifier, theta=1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with JSMA adversarial examples: %.2f%%', (acc * 100)) + + + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_spatial_transformation.py b/tests/attacks/test_spatial_transformation.py index 0975020c23..1140c7f5bf 100644 --- a/tests/attacks/test_spatial_transformation.py +++ b/tests/attacks/test_spatial_transformation.py @@ -140,6 +140,18 @@ def test_ptclassifier(self): self.assertTrue(abs(x_test_adv[0, 0, 14, 14] - 0.008591662) <= 0.01) + def test_failure_feature_vectors(self): + attack_params = {"max_translation": 10.0, "num_translations": 3, "max_rotation": 30.0, "num_rotations": 3} + attack = SpatialTransformation(classifier=None) + attack.set_params(**attack_params) + data = np.random.rand(10, 4) + + # Assert that value error is raised for feature vectors + with self.assertRaises(ValueError) as context: + attack.generate(data) + + self.assertTrue('Feature vectors detected.' in str(context.exception)) + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_universal_perturbation.py b/tests/attacks/test_universal_perturbation.py index 74e47386ef..2ebe500d50 100644 --- a/tests/attacks/test_universal_perturbation.py +++ b/tests/attacks/test_universal_perturbation.py @@ -23,7 +23,9 @@ import numpy as np from art.attacks.universal_perturbation import UniversalPerturbation -from art.utils import load_mnist, master_seed, get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.classifiers import KerasClassifier +from art.utils import load_dataset, master_seed, get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') @@ -40,7 +42,7 @@ class TestUniversalPerturbation(unittest.TestCase): @classmethod def setUpClass(cls): # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN] x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -65,7 +67,7 @@ def test_tfclassifier(self): x_train_adv = up.generate(x_train) self.assertTrue((up.fooling_rate >= 0.2) or not up.converged) - x_test_adv = x_test + up.v + x_test_adv = x_test + up.noise self.assertFalse((x_test == x_test_adv).all()) train_y_pred = np.argmax(tfc.predict(x_train_adv), axis=1) @@ -89,7 +91,7 @@ def test_krclassifier(self): x_train_adv = up.generate(x_train) self.assertTrue((up.fooling_rate >= 0.2) or not up.converged) - x_test_adv = x_test + up.v + x_test_adv = x_test + up.noise self.assertFalse((x_test == x_test_adv).all()) train_y_pred = np.argmax(krc.predict(x_train_adv), axis=1) @@ -115,7 +117,7 @@ def test_ptclassifier(self): x_train_adv = up.generate(x_train) self.assertTrue((up.fooling_rate >= 0.2) or not up.converged) - x_test_adv = x_test + up.v + x_test_adv = x_test + up.noise self.assertFalse((x_test == x_test_adv).all()) train_y_pred = np.argmax(ptc.predict(x_train_adv), axis=1) @@ -124,5 +126,86 @@ def test_ptclassifier(self): self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all()) +class TestUniversalPerturbationVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Test untargeted attack + attack_params = {"max_iter": 1, "attacker": "newtonfool", "attacker_params": {"max_iter": 5}} + attack = UniversalPerturbation(classifier) + attack.set_params(**attack_params) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack_params = {"max_iter": 1, "attacker": "newtonfool", "attacker_params": {"max_iter": 5}} + attack = UniversalPerturbation(classifier) + attack.set_params(**attack_params) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + # Test untargeted attack + attack_params = {"max_iter": 1, "attacker": "ead", "attacker_params": {"max_iter": 5, "targeted": False}} + attack = UniversalPerturbation(classifier) + attack.set_params(**attack_params) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + + attack_params = {"max_iter": 1, "attacker": "ead", "attacker_params": {"max_iter": 5, "targeted": False}} + attack = UniversalPerturbation(classifier) + attack.set_params(**attack_params) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100)) + + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_virtual_adversarial.py b/tests/attacks/test_virtual_adversarial.py index d5647d3222..792b6275bb 100644 --- a/tests/attacks/test_virtual_adversarial.py +++ b/tests/attacks/test_virtual_adversarial.py @@ -25,8 +25,10 @@ import tensorflow as tf from art.attacks.virtual_adversarial import VirtualAdversarialMethod -from art.utils import load_mnist, get_labels_np_array, master_seed +from art.classifiers import KerasClassifier +from art.utils import load_dataset, get_labels_np_array, master_seed, random_targets from art.utils import get_classifier_tf, get_classifier_kr, get_classifier_pt +from art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') @@ -41,7 +43,7 @@ def setUpClass(cls): k.set_learning_phase(1) # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -120,5 +122,79 @@ def _test_backend_mnist(self, classifier): logging.info('Accuracy on adversarial examples: %.2f%%', (acc * 100)) +class TestVirtualAdversarialVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + master_seed(1234) + + def test_iris_k_clipped(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Test untargeted attack + attack = VirtualAdversarialMethod(classifier, eps=.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_k_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + attack = VirtualAdversarialMethod(classifier, eps=1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv > 1).any()) + self.assertTrue((x_test_adv < 0).any()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_tf(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_tf() + + attack = VirtualAdversarialMethod(classifier, eps=.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100)) + + def test_iris_pt(self): + (_, _), (x_test, y_test) = self.iris + classifier = get_iris_classifier_pt() + + attack = VirtualAdversarialMethod(classifier, eps=.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100)) + + if __name__ == '__main__': unittest.main() diff --git a/tests/attacks/test_zoo.py b/tests/attacks/test_zoo.py index a4793fa4ad..03b2d39d65 100644 --- a/tests/attacks/test_zoo.py +++ b/tests/attacks/test_zoo.py @@ -26,7 +26,7 @@ from art.attacks.zoo import ZooAttack from art.utils import get_classifier_kr, get_classifier_pt, get_classifier_tf -from art.utils import load_mnist, random_targets, master_seed +from art.utils import load_dataset, random_targets, master_seed, get_iris_classifier_pt logger = logging.getLogger('testLogger') @@ -41,7 +41,7 @@ class TestZooAttack(unittest.TestCase): @classmethod def setUpClass(cls): # Get MNIST - (_, _), (x_test, y_test), _, _ = load_mnist() + (_, _), (x_test, y_test), _, _ = load_dataset('mnist') x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = x_test, y_test @@ -93,7 +93,7 @@ def test_tfclassifier(self): y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1) logger.debug('ZOO target: %s', target) logger.debug('ZOO actual: %s', y_pred_adv) - logger.info('ZOO success rate: %.2f', (sum(target == y_pred_adv) / float(len(target)))) + logger.info('ZOO success rate on MNIST: %.2f', (sum(target == y_pred_adv) / float(len(target)))) # Untargeted attack zoo = ZooAttack(classifier=tfc, targeted=False) @@ -104,7 +104,7 @@ def test_tfclassifier(self): y_pred = np.argmax(tfc.predict(x_test), axis=1) y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1) logger.debug('ZOO actual: %s', y_pred_adv) - logger.info('ZOO success rate: %.2f', (sum(y_pred != y_pred_adv) / float(len(y_pred)))) + logger.info('ZOO success rate on MNIST: %.2f', (sum(y_pred != y_pred_adv) / float(len(y_pred)))) # Clean-up session sess.close() @@ -121,7 +121,7 @@ def test_krclassifier(self): # Get MNIST and test with 3 channels x_test, y_test = self.mnist - # First attack + # Targeted attack zoo = ZooAttack(classifier=krc, targeted=True, batch_size=5) params = {'y': random_targets(y_test, krc.nb_classes)} x_test_adv = zoo.generate(x_test, **params) @@ -132,18 +132,18 @@ def test_krclassifier(self): y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1) logger.debug('ZOO target: %s', target) logger.debug('ZOO actual: %s', y_pred_adv) - logger.info('ZOO success rate: %.2f', (sum(target == y_pred_adv) / float(len(target)))) + logger.info('ZOO success rate on MNIST: %.2f', (sum(target == y_pred_adv) / float(len(target)))) - # Second attack + # Untargeted attack zoo = ZooAttack(classifier=krc, targeted=False, max_iter=20) x_test_adv = zoo.generate(x_test) - self.assertFalse((x_test == x_test_adv).all()) + # self.assertFalse((x_test == x_test_adv).all()) self.assertTrue((x_test_adv <= 1.0001).all()) self.assertTrue((x_test_adv >= -0.0001).all()) y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1) y_pred = np.argmax(krc.predict(x_test), axis=1) logger.debug('ZOO actual: %s', y_pred_adv) - logger.info('ZOO success rate: %.2f', (sum(y_pred != y_pred_adv) / float(len(y_pred)))) + logger.info('ZOO success rate on MNIST: %.2f', (sum(y_pred != y_pred_adv) / float(len(y_pred)))) # Clean-up k.clear_session() @@ -171,7 +171,7 @@ def test_ptclassifier(self): y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1) logger.debug('ZOO target: %s', target) logger.debug('ZOO actual: %s', y_pred_adv) - logger.info('ZOO success rate: %.2f', (sum(target != y_pred_adv) / float(len(target)))) + logger.info('ZOO success rate on MNIST: %.2f', (sum(target != y_pred_adv) / float(len(target)))) # Second attack zoo = ZooAttack(classifier=ptc, targeted=False, max_iter=10) @@ -181,7 +181,21 @@ def test_ptclassifier(self): y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1) y_pred = np.argmax(ptc.predict(x_test), axis=1) logger.debug('ZOO actual: %s', y_pred_adv) - logger.info('ZOO success rate: %.2f', (sum(y_pred != y_pred_adv) / float(len(y_pred)))) + logger.info('ZOO success rate on MNIST: %.2f', (sum(y_pred != y_pred_adv) / float(len(y_pred)))) + + def test_failure_feature_vectors(self): + attack_params = {"rotation_max": 22.5, "scale_min": 0.1, "scale_max": 1.0, + "learning_rate": 5.0, "number_of_steps": 5, "patch_shape": (1, 28, 28), "batch_size": 10} + classifier = get_iris_classifier_pt() + data = np.random.rand(10, 4) + + # Assert that value error is raised for feature vectors + with self.assertRaises(ValueError) as context: + attack = ZooAttack(classifier=classifier) + attack.set_params(**attack_params) + attack.generate(data) + + self.assertTrue('Feature vectors detected.' in str(context.exception)) if __name__ == '__main__': diff --git a/tests/classifiers/test_classifier.py b/tests/classifiers/test_classifier.py index 99ed56c7a5..25871df1e6 100644 --- a/tests/classifiers/test_classifier.py +++ b/tests/classifiers/test_classifier.py @@ -57,5 +57,5 @@ def test_repr(self): repr_ = repr(classifier) self.assertTrue('ClassifierInstance' in repr_) - self.assertTrue('clip_values=(0, 1)' in repr_) - self.assertTrue('channel_index=1, defences=None, preprocessing=(0, 1)' in repr_) + self.assertTrue('channel_index=1, clip_values=(0, 1)' in repr_) + self.assertTrue('defences=None, preprocessing=(0, 1)' in repr_) diff --git a/tests/classifiers/test_detector_classifier.py b/tests/classifiers/test_detector_classifier.py index 3f263347f5..eefc6e3c8e 100644 --- a/tests/classifiers/test_detector_classifier.py +++ b/tests/classifiers/test_detector_classifier.py @@ -61,7 +61,8 @@ def setUpClass(cls): model = Model() loss_fn = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01) - classifier = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28), 10) + classifier = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(1, 28, 28), + nb_classes=10, clip_values=(0, 1)) classifier.fit(x_train, y_train, batch_size=100, nb_epochs=2) # Define the internal detector @@ -72,7 +73,8 @@ def setUpClass(cls): model = nn.Sequential(conv, nn.ReLU(), nn.MaxPool2d(2, 2), Flatten(), linear) loss_fn = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01) - detector = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28), 1) + detector = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(1, 28, 28), + nb_classes=1, clip_values=(0, 1)) # Define the detector-classifier cls.detector_classifier = DetectorClassifier(classifier=classifier, detector=detector) diff --git a/tests/classifiers/test_ensemble.py b/tests/classifiers/test_ensemble.py index 87ee574ca7..39a6557205 100644 --- a/tests/classifiers/test_ensemble.py +++ b/tests/classifiers/test_ensemble.py @@ -47,9 +47,9 @@ def setUpClass(cls): x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = ((x_train, y_train), (x_test, y_test)) - model_1 = KerasClassifier((0, 1), cls._get_model(epochs=2)) - model_2 = KerasClassifier((0, 1), cls._get_model(epochs=2)) - cls.ensemble = EnsembleClassifier((0, 1), [model_1, model_2]) + model_1 = KerasClassifier(model=cls._get_model(epochs=2), clip_values=(0, 1)) + model_2 = KerasClassifier(model=cls._get_model(epochs=2), clip_values=(0, 1)) + cls.ensemble = EnsembleClassifier(classifiers=[model_1, model_2], clip_values=(0, 1)) @classmethod def tearDownClass(cls): @@ -126,6 +126,5 @@ def test_class_gradient(self): def test_repr(self): repr_ = repr(self.ensemble) self.assertTrue('art.classifiers.ensemble.EnsembleClassifier' in repr_) - self.assertTrue('clip_values=(0, 1)' in repr_) self.assertTrue('classifier_weights=array([0.5, 0.5])' in repr_) - self.assertTrue('channel_index=3, defences=None, preprocessing=(0, 1)' in repr_) + self.assertTrue('channel_index=3, clip_values=(0, 1), defences=None, preprocessing=(0, 1)' in repr_) diff --git a/tests/classifiers/test_keras.py b/tests/classifiers/test_keras.py index ed27425912..8ffd999dc8 100644 --- a/tests/classifiers/test_keras.py +++ b/tests/classifiers/test_keras.py @@ -235,11 +235,11 @@ def test_functional_model(self): def _test_functional_model(self, custom_activation=True): # Need to update the functional_model code to produce a model with more than one input and output layers... - keras_model = KerasClassifier((0, 1), self.functional_model, input_layer=1, output_layer=1, + keras_model = KerasClassifier(self.functional_model, clip_values=(0, 1), input_layer=1, output_layer=1, custom_activation=custom_activation) self.assertTrue(keras_model._input.name, "input1") self.assertTrue(keras_model._output.name, "output1") - keras_model = KerasClassifier((0, 1), self.functional_model, input_layer=0, output_layer=0, + keras_model = KerasClassifier(self.functional_model, clip_values=(0, 1), input_layer=0, output_layer=0, custom_activation=custom_activation) self.assertTrue(keras_model._input.name, "input0") self.assertTrue(keras_model._output.name, "output0") @@ -266,7 +266,7 @@ def test_resnet(self): keras.backend.set_learning_phase(0) model = ResNet50(weights='imagenet') - classifier = KerasClassifier((0, 255), model) + classifier = KerasClassifier(model, clip_values=(0, 255)) # Load image from file image = img_to_array(load_img(os.path.join(self.test_dir, 'test.jpg'), target_size=(224, 224))) @@ -307,7 +307,8 @@ def test_pickle(self): import pickle fs = FeatureSqueezing(bit_depth=1, clip_values=(0, 1)) - keras_model = KerasClassifier((0, 1), self.functional_model, input_layer=1, output_layer=1, defences=fs) + keras_model = KerasClassifier(self.functional_model, clip_values=(0, 1), input_layer=1, output_layer=1, + defences=fs) with open(full_path, 'wb') as save_file: pickle.dump(keras_model, save_file) @@ -327,6 +328,6 @@ def test_pickle(self): def test_repr(self): repr_ = repr(self.model_mnist) self.assertTrue('art.classifiers.keras.KerasClassifier' in repr_) - self.assertTrue('clip_values=(0, 1)' in repr_) - self.assertTrue('use_logits=False, channel_index=3, defences=None, preprocessing=(0, 1)' in repr_) + self.assertTrue('use_logits=False, channel_index=3' in repr_) + self.assertTrue('clip_values=(0, 1), defences=None, preprocessing=(0, 1)' in repr_) self.assertTrue('input_layer=0, output_layer=0' in repr_) diff --git a/tests/classifiers/test_mxnet.py b/tests/classifiers/test_mxnet.py index 78f0b17e45..ea757fb30e 100644 --- a/tests/classifiers/test_mxnet.py +++ b/tests/classifiers/test_mxnet.py @@ -46,7 +46,8 @@ def setUpClass(cls): trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1}) # Fit classifier - classifier = MXClassifier((0, 1), net, (1, 28, 28), 10, trainer) + classifier = MXClassifier(model=net, clip_values=(0, 1), input_shape=(1, 28, 28), nb_classes=10, + optimizer=trainer) classifier.fit(x_train, y_train, batch_size=128, nb_epochs=2) cls.classifier = classifier @@ -54,7 +55,7 @@ def setUp(self): # Set master seed master_seed(1234) - def test_fit_predict(self): + def test_predict(self): (_, _), (x_test, y_test) = self.mnist preds = self.classifier.predict(x_test) @@ -127,8 +128,8 @@ def test_preprocessing(self): (_, _), (x_test, _) = self.mnist # Create classifier - classifier_preproc = MXClassifier((0, 1), self.classifier._model, (1, 28, 28), 10, self.classifier._optimizer, - preprocessing=(1, 2)) + classifier_preproc = MXClassifier(model=self.classifier._model, clip_values=(0, 1), input_shape=(1, 28, 28), + nb_classes=10, optimizer=self.classifier._optimizer, preprocessing=(1, 2)) preds = self.classifier.predict((x_test - 1.) / 2) preds_preproc = classifier_preproc.predict(x_test) @@ -177,9 +178,9 @@ def test_save(self): def test_repr(self): repr_ = repr(self.classifier) self.assertTrue('art.classifiers.mxnet.MXClassifier' in repr_) - self.assertTrue('clip_values=(0, 1)' in repr_) self.assertTrue('input_shape=(1, 28, 28), nb_classes=10' in repr_) - self.assertTrue('channel_index=1, defences=None, preprocessing=(0, 1)' in repr_) + self.assertTrue('channel_index=1, clip_values=(0, 1)' in repr_) + self.assertTrue('defences=None, preprocessing=(0, 1)' in repr_) if __name__ == '__main__': diff --git a/tests/classifiers/test_pytorch.py b/tests/classifiers/test_pytorch.py index 029429fc51..87849be4e2 100644 --- a/tests/classifiers/test_pytorch.py +++ b/tests/classifiers/test_pytorch.py @@ -61,7 +61,8 @@ def setUpClass(cls): # Define a loss function and optimizer loss_fn = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01) - classifier = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28), 10) + classifier = PyTorchClassifier(clip_values=(0, 1), model=model, loss=loss_fn, optimizer=optimizer, + input_shape=(1, 28, 28), nb_classes=10) classifier.fit(x_train, y_train, batch_size=100, nb_epochs=2) cls.seq_classifier = classifier @@ -69,7 +70,8 @@ def setUpClass(cls): model = Model() loss_fn = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01) - classifier2 = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28), 10) + classifier2 = PyTorchClassifier(clip_values=(0, 1), model=model, loss=loss_fn, optimizer=optimizer, + input_shape=(1, 28, 28), nb_classes=10) classifier2.fit(x_train, y_train, batch_size=100, nb_epochs=2) cls.module_classifier = classifier2 @@ -218,8 +220,8 @@ def test_save(self): def test_repr(self): repr_ = repr(self.module_classifier) self.assertTrue('art.classifiers.pytorch.PyTorchClassifier' in repr_) - self.assertTrue('clip_values=(0, 1)' in repr_) self.assertTrue('input_shape=(1, 28, 28), nb_classes=10, channel_index=1' in repr_) + self.assertTrue('clip_values=(0, 1)' in repr_) self.assertTrue('defences=None, preprocessing=(0, 1)' in repr_) if __name__ == '__main__': diff --git a/tests/classifiers/test_tensorflow.py b/tests/classifiers/test_tensorflow.py index f24cf20ab1..b50e63a18b 100644 --- a/tests/classifiers/test_tensorflow.py +++ b/tests/classifiers/test_tensorflow.py @@ -191,8 +191,8 @@ def test_set_learning(self): def test_repr(self): repr_ = repr(self.classifier) self.assertTrue('art.classifiers.tensorflow.TFClassifier' in repr_) - self.assertTrue('clip_values=(0, 1)' in repr_) - self.assertTrue('channel_index=3, defences=None, preprocessing=(0, 1)' in repr_) + self.assertTrue('channel_index=3, clip_values=(0, 1)' in repr_) + self.assertTrue('defences=None, preprocessing=(0, 1)' in repr_) if __name__ == '__main__': diff --git a/tests/defences/test_adversarial_trainer.py b/tests/defences/test_adversarial_trainer.py index a586f3575a..6312c4073f 100644 --- a/tests/defences/test_adversarial_trainer.py +++ b/tests/defences/test_adversarial_trainer.py @@ -105,8 +105,8 @@ def _cnn_mnist_tf(input_shape): TestBase.sess = tf.Session() TestBase.sess.run(tf.global_variables_initializer()) - classifier = TFClassifier((0, 1), inputs_tf, logits, loss=loss, train=train_tf, output_ph=labels_tf, - sess=TestBase.sess) + classifier = TFClassifier(input_ph=inputs_tf, logits=logits, loss=loss, train=train_tf, output_ph=labels_tf, + sess=TestBase.sess, clip_values=(0, 1)) return classifier @staticmethod @@ -121,7 +121,7 @@ def _cnn_mnist_k(input_shape): model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01), metrics=['accuracy']) - classifier = KerasClassifier((0, 1), model, use_logits=False) + classifier = KerasClassifier(model=model, clip_values=(0, 1), use_logits=False) return classifier @@ -205,7 +205,7 @@ def __init__(self, x, y, size, batch_size): def get_batch(self): ids = np.random.choice(self.size, size=min(self.size, self.batch_size), replace=False) - return (self.x[ids], self.y[ids]) + return self.x[ids], self.y[ids] generator = MyDataGenerator(x_train, y_train, x_train.shape[0], 128) attack1 = FastGradientMethod(self.classifier_k) diff --git a/tests/defences/test_jpeg_compression.py b/tests/defences/test_jpeg_compression.py index 75752a1695..ad0f8badc6 100644 --- a/tests/defences/test_jpeg_compression.py +++ b/tests/defences/test_jpeg_compression.py @@ -63,6 +63,16 @@ def test_channel_index(self): self.assertTrue((compressed_x <= 1.0).all()) self.assertTrue((compressed_x >= 0.0).all()) + def test_failure_feature_vectors(self): + x = np.random.rand(10, 3) + preprocess = JpegCompression(channel_index=1, quality=80) + + # Assert that value error is raised for feature vectors + with self.assertRaises(ValueError) as context: + preprocess(x) + + self.assertTrue('Feature vectors detected.' in str(context.exception)) + if __name__ == '__main__': unittest.main() diff --git a/tests/defences/test_pixel_defend.py b/tests/defences/test_pixel_defend.py index 7e392c3e86..3f853d1313 100644 --- a/tests/defences/test_pixel_defend.py +++ b/tests/defences/test_pixel_defend.py @@ -20,6 +20,7 @@ import logging import unittest +import numpy as np import torch.nn as nn import torch.optim as optim @@ -30,9 +31,9 @@ logger = logging.getLogger('testLogger') -class Model(nn.Module): +class ModelImage(nn.Module): def __init__(self): - super(Model, self).__init__() + super(ModelImage, self).__init__() self.fc = nn.Linear(25, 6400) def forward(self, x): @@ -43,22 +44,34 @@ def forward(self, x): return logit_output +class Model(nn.Module): + def __init__(self): + super(Model, self).__init__() + self.fc = nn.Linear(4, 1024) + + def forward(self, x): + x = x.view(-1, 4) + logit_output = self.fc(x) + logit_output = logit_output.view(-1, 4, 256) + + return logit_output + + class TestPixelDefend(unittest.TestCase): def setUp(self): # Set master seed master_seed(1234) - # Define the network - model = Model() - loss_fn = nn.CrossEntropyLoss() - optimizer = optim.Adam(model.parameters(), lr=0.01) - self.pixelcnn = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28), 10) - def test_one_channel(self): (x_train, _), (_, _), _, _ = load_mnist() - x_train = x_train[:2] + x_train = x_train[:2, 10:15, 15:20, :] - x_train = x_train[:, 10:15, 15:20, :] + # Define the network + model = ModelImage() + loss_fn = nn.CrossEntropyLoss() + optimizer = optim.Adam(model.parameters(), lr=0.01) + self.pixelcnn = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(1, 28, 28), + nb_classes=10, clip_values=(0, 1)) preprocess = PixelDefend(eps=5, pixel_cnn=self.pixelcnn) defended_x, _ = preprocess(x_train) @@ -66,6 +79,22 @@ def test_one_channel(self): self.assertTrue((defended_x <= 1.0).all()) self.assertTrue((defended_x >= 0.0).all()) + def test_feature_vectors(self): + # Define the network + model = Model() + loss_fn = nn.CrossEntropyLoss() + optimizer = optim.Adam(model.parameters(), lr=0.01) + pixel_cnn = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(4,), + nb_classes=2, clip_values=(0, 1)) + + x = np.random.rand(5, 4) + preprocess = PixelDefend(eps=5, pixel_cnn=pixel_cnn) + defended_x, _ = preprocess(x) + + self.assertTrue((defended_x.shape == x.shape)) + self.assertTrue((defended_x <= 1.0).all()) + self.assertTrue((defended_x >= 0.0).all()) + if __name__ == '__main__': unittest.main() diff --git a/tests/defences/test_spatial_smoothing.py b/tests/defences/test_spatial_smoothing.py index 75083b4928..5ee24482ff 100644 --- a/tests/defences/test_spatial_smoothing.py +++ b/tests/defences/test_spatial_smoothing.py @@ -72,6 +72,14 @@ def test_channels(self): self.assertTrue((smooth_x[0, 0] == new_smooth_x[0, :, :, 0]).all()) + def test_failure(self): + x = np.arange(10).reshape(5, 2) + preprocess = SpatialSmoothing(channel_index=1) + with self.assertRaises(ValueError) as context: + preprocess(x) + + self.assertTrue('Feature vectors detected.' in str(context.exception)) + if __name__ == '__main__': unittest.main() diff --git a/tests/defences/test_thermometer_encoding.py b/tests/defences/test_thermometer_encoding.py index c9774c1f24..4c3bdb2f87 100644 --- a/tests/defences/test_thermometer_encoding.py +++ b/tests/defences/test_thermometer_encoding.py @@ -33,7 +33,7 @@ def setUp(self): # Set master seed master_seed(1234) - def test_all(self): + def test_channel_last(self): # Test data x = np.array([[[[0.2, 0.6, 0.8], [0.9, 0.4, 0.3], [0.2, 0.8, 0.5]], [[0.2, 0.6, 0.8], [0.9, 0.4, 0.3], [0.2, 0.8, 0.5]]], @@ -57,13 +57,29 @@ def test_all(self): [0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], [1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1]]]]) self.assertTrue((x_preproc == true_value).all()) + def test_channel_first(self): + x = np.random.rand(5, 2, 28, 28) + x_copy = x.copy() + num_space = 5 + encoder = ThermometerEncoding(num_space=num_space, channel_index=1) + encoded_x, _ = encoder(x) + self.assertTrue((x == x_copy).all()) + self.assertTrue(encoded_x.shape == (5, 10, 28, 28)) + def test_estimate_gradient(self): num_space = 5 encoder = ThermometerEncoding(num_space=num_space) - x = np.random.rand(10, 28, 28, 1) - grad = np.ones((10, 28, 28, 1, num_space)) + x = np.random.rand(5, 28, 28, 1) + grad = np.ones((5, 28, 28, num_space)) estimated_grads = encoder.estimate_gradient(grad=grad, x=x) self.assertTrue(np.isin(estimated_grads, [0, 1]).all()) + def test_feature_vectors(self): + x = np.random.rand(10, 4) + num_space = 5 + encoder = ThermometerEncoding(num_space=num_space, channel_index=1) + encoded_x, _ = encoder(x) + self.assertTrue(encoded_x.shape == (10, 20)) + if __name__ == '__main__': unittest.main() diff --git a/tests/defences/test_variance_minimization.py b/tests/defences/test_variance_minimization.py index 3af5758cb4..f05800d187 100644 --- a/tests/defences/test_variance_minimization.py +++ b/tests/defences/test_variance_minimization.py @@ -51,6 +51,16 @@ def test_three_channels(self): self.assertTrue((preprocessed_x >= 0.0).all()) self.assertFalse((preprocessed_x == x).all()) + def test_failure_feature_vectors(self): + x = np.random.rand(10, 3) + preprocess = TotalVarMin() + + # Assert that value error is raised for feature vectors + with self.assertRaises(ValueError) as context: + preprocess(x) + + self.assertTrue('Feature vectors detected.' in str(context.exception)) + if __name__ == '__main__': unittest.main() diff --git a/tests/detection/test_detector.py b/tests/detection/test_detector.py index d00730a80c..e6fb4eb52a 100644 --- a/tests/detection/test_detector.py +++ b/tests/detection/test_detector.py @@ -81,7 +81,7 @@ def test_binary_input_detector(self): metrics=['accuracy']) # Create detector and train it: - detector = BinaryInputDetector(KerasClassifier((0, 1), model, use_logits=False)) + detector = BinaryInputDetector(KerasClassifier(model=model, clip_values=(0, 1), use_logits=False)) detector.fit(x_train_detector, y_train_detector, nb_epochs=2, batch_size=128) # Apply detector on clean and adversarial test data: @@ -143,7 +143,7 @@ def test_binary_activation_detector(self): # Create detector and train it. # Detector consider activations at layer=0: detector = BinaryActivationDetector(classifier=classifier, - detector=KerasClassifier((0, 1), model, use_logits=False), + detector=KerasClassifier(model=model, clip_values=(0, 1), use_logits=False), layer=0) detector.fit(x_train_detector, y_train_detector, nb_epochs=2, batch_size=128) diff --git a/tests/poison_detection/test_activation_defence.py b/tests/poison_detection/test_activation_defence.py index 676ef2436d..2df397d7de 100644 --- a/tests/poison_detection/test_activation_defence.py +++ b/tests/poison_detection/test_activation_defence.py @@ -56,7 +56,7 @@ def setUpClass(cls): model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) from art.classifiers import KerasClassifier - cls.classifier = KerasClassifier((min_, max_), model=model) + cls.classifier = KerasClassifier(model=model, clip_values=(min_, max_)) cls.classifier.fit(x_train, y_train, nb_epochs=1, batch_size=128) diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 2d5c495742..082a3257a7 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -110,7 +110,7 @@ def _cnn_mnist_k(input_shape): model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01), metrics=['accuracy']) - classifier = KerasClassifier((0, 1), model, use_logits=False) + classifier = KerasClassifier(model=model, clip_values=(0, 1), use_logits=False) return classifier ######################################### @@ -169,7 +169,8 @@ def _create_tfclassifier(): sess.run(tf.global_variables_initializer()) # Create the classifier - tfc = TFClassifier((0, 1), input_ph, logits, output_ph, train, loss, None, sess) + tfc = TFClassifier(input_ph=input_ph, logits=logits, output_ph=output_ph, train=train, loss=loss, + learning=None, sess=sess, clip_values=(0, 1)) return tfc @@ -194,7 +195,7 @@ def _create_krclassifier(): metrics=['accuracy']) # Get the classifier - krc = KerasClassifier((0, 1), model, use_logits=False) + krc = KerasClassifier(model, clip_values=(0, 1), use_logits=False) return krc @@ -212,7 +213,8 @@ def _create_ptclassifier(): optimizer = optim.Adam(model.parameters(), lr=0.01) # Get classifier - ptc = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28), 10) + ptc = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(1, 28, 28), nb_classes=10, + clip_values=(0, 1)) return ptc diff --git a/tests/test_utils.py b/tests/test_utils.py index 6c7e4fb218..149a1bf2c0 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -22,8 +22,8 @@ import numpy as np -from art.utils import load_mnist, projection, random_sphere, to_categorical, least_likely_class -from art.utils import master_seed +from art.utils import projection, random_sphere, to_categorical, least_likely_class +from art.utils import load_iris, load_mnist, master_seed from art.utils import second_most_likely_class, random_targets, get_label_conf, get_labels_np_array, preprocess logger = logging.getLogger('testLogger') @@ -230,7 +230,7 @@ def test_preprocess(self): x = (255 * x).astype('int')[:100] y = np.argmax(y, axis=1)[:100] - x_, y_ = preprocess(x, y) + x_, y_ = preprocess(x, y, clip_values=(0, 255)) self.assertEqual(x_.shape, x.shape) self.assertEqual(y_.shape, (y.shape[0], 10)) self.assertEqual(x_.max(), 1.0) @@ -240,12 +240,30 @@ def test_preprocess(self): x = (5 * x).astype('int')[:100] y = np.argmax(y, axis=1)[:100] - x_, y_ = preprocess(x, y, nb_classes=20, max_value=5) + x_, y_ = preprocess(x, y, nb_classes=20, clip_values=(0, 5)) self.assertEqual(x_.shape, x.shape) self.assertEqual(y_.shape, (y.shape[0], 20)) self.assertEqual(x_.max(), 1.0) self.assertEqual(x_.min(), 0) + def test_iris(self): + (x_train, y_train), (x_test, y_test), min_, max_ = load_iris() + + self.assertTrue((min_ == 0).all()) + self.assertTrue((max_ == 1).all()) + self.assertEqual(x_train.shape[0], y_train.shape[0]) + self.assertEqual(x_test.shape[0], y_test.shape[0]) + train_labels = np.argmax(y_train, axis=1) + self.assertTrue(np.setdiff1d(train_labels, np.array([0, 1, 2])).shape == (0,)) + test_labels = np.argmax(y_test, axis=1) + self.assertTrue(np.setdiff1d(test_labels, np.array([0, 1, 2])).shape == (0,)) + + (x_train, y_train), (x_test, y_test), min_, max_ = load_iris(test_set=0) + self.assertTrue(x_train.shape[0] == 150) + self.assertTrue(y_train.shape[0] == 150) + self.assertTrue(x_test is None) + self.assertTrue(y_test is None) + if __name__ == '__main__': unittest.main() diff --git a/tests/wrappers/test_expectation.py b/tests/wrappers/test_expectation.py index decc9f9354..3508630935 100644 --- a/tests/wrappers/test_expectation.py +++ b/tests/wrappers/test_expectation.py @@ -23,7 +23,8 @@ import numpy as np from art.attacks import FastGradientMethod -from art.utils import load_mnist, random_targets, master_seed, get_classifier_kr +from art.classifiers import KerasClassifier +from art.utils import load_dataset, random_targets, master_seed, get_classifier_kr, get_iris_classifier_kr from art.wrappers.expectation import ExpectationOverTransformations logger = logging.getLogger('testLogger') @@ -41,7 +42,7 @@ class TestExpectationOverTransformations(unittest.TestCase): @classmethod def setUpClass(cls): # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN] x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -81,5 +82,67 @@ def transformation(): self.assertTrue((np.abs(x_test_adv - x_test_adv_with_eot) < 0.001).all()) +class TestExpectationVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + # Set master seed + master_seed(1234) + + def test_iris_clipped(self): + (_, _), (x_test, y_test) = self.iris + + def t(x): + return x + + def transformation(): + while True: + yield t + + classifier, _ = get_iris_classifier_kr() + classifier = ExpectationOverTransformations(classifier, sample_size=1, transformation=transformation) + + # Test untargeted attack + attack = FastGradientMethod(classifier, eps=.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with limited query info: %.2f%%', (acc * 100)) + + def test_iris_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + def t(x): + return x + + def transformation(): + while True: + yield t + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + classifier = ExpectationOverTransformations(classifier, sample_size=1, transformation=transformation) + attack = FastGradientMethod(classifier, eps=1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv > 1).any()) + self.assertTrue((x_test_adv < 0).any()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with limited query info: %.2f%%', (acc * 100)) + + if __name__ == '__main__': unittest.main() diff --git a/tests/wrappers/test_query_efficient_bb.py b/tests/wrappers/test_query_efficient_bb.py index 9d74c059aa..81535d957e 100644 --- a/tests/wrappers/test_query_efficient_bb.py +++ b/tests/wrappers/test_query_efficient_bb.py @@ -27,7 +27,8 @@ from art.wrappers.query_efficient_bb import QueryEfficientBBGradientEstimation from art.classifiers import KerasClassifier from art.defences import FeatureSqueezing -from art.utils import load_mnist, get_classifier_kr, get_labels_np_array, master_seed +from art.utils import load_dataset, get_classifier_kr, get_iris_classifier_kr, get_labels_np_array, master_seed +from art.utils import random_targets logger = logging.getLogger('testLogger') @@ -42,7 +43,7 @@ def setUpClass(cls): k.set_learning_phase(1) # Get MNIST - (x_train, y_train), (x_test, y_test), _, _ = load_mnist() + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) @@ -93,7 +94,7 @@ def test_with_defences(self): # Get the ready-trained Keras model model = self.classifier_k._model fs = FeatureSqueezing(bit_depth=1, clip_values=(0, 1)) - classifier = KerasClassifier((0, 1), model, defences=fs) + classifier = KerasClassifier(model=model, clip_values=(0, 1), defences=fs) # Wrap the classifier classifier = QueryEfficientBBGradientEstimation(classifier, 20, 1/64., round_samples=1/255.) @@ -121,5 +122,53 @@ def test_with_defences(self): (acc * 100)) +class TestQueryEfficientVectors(unittest.TestCase): + @classmethod + def setUpClass(cls): + # Get Iris + (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') + cls.iris = (x_train, y_train), (x_test, y_test) + + def setUp(self): + # Set master seed + master_seed(1234) + + def test_iris_clipped(self): + (_, _), (x_test, y_test) = self.iris + + classifier, _ = get_iris_classifier_kr() + classifier = QueryEfficientBBGradientEstimation(classifier, 20, 1/64., round_samples=1 / 255.) + + # Test untargeted attack + attack = FastGradientMethod(classifier, eps=.1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv <= 1).all()) + self.assertTrue((x_test_adv >= 0).all()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with limited query info: %.2f%%', (acc * 100)) + + def test_iris_unbounded(self): + (_, _), (x_test, y_test) = self.iris + classifier, _ = get_iris_classifier_kr() + + # Recreate a classifier without clip values + classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) + classifier = QueryEfficientBBGradientEstimation(classifier, 20, 1/64., round_samples=1 / 255.) + attack = FastGradientMethod(classifier, eps=1) + x_test_adv = attack.generate(x_test) + self.assertFalse((x_test == x_test_adv).all()) + self.assertTrue((x_test_adv > 1).any()) + self.assertTrue((x_test_adv < 0).any()) + + preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) + self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) + acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] + logger.info('Accuracy on Iris with limited query info: %.2f%%', (acc * 100)) + + if __name__ == '__main__': unittest.main()