From f6a402cd3ec94038724971998f31dc833376b957 Mon Sep 17 00:00:00 2001 From: Kevin Eykholt Date: Wed, 1 Dec 2021 15:14:27 +0000 Subject: [PATCH] line length Signed-off-by: Kevin Eykholt --- art/__init__.py | 20 +++- art/attacks/attack.py | 4 +- .../adversarial_patch_numpy.py | 35 ++++-- .../adversarial_patch_pytorch.py | 30 +++-- .../adversarial_patch_tensorflow.py | 41 +++++-- art/attacks/evasion/auto_attack.py | 19 ++- art/attacks/evasion/boundary.py | 43 +++++-- art/attacks/evasion/brendel_bethge.py | 113 +++++++++++++++--- art/attacks/evasion/carlini.py | 92 +++++++++++--- art/attacks/evasion/decision_tree_attack.py | 21 +++- art/attacks/evasion/deepfool.py | 8 +- art/attacks/evasion/dpatch.py | 16 ++- art/attacks/evasion/dpatch_robust.py | 15 ++- art/attacks/evasion/elastic_net.py | 17 ++- art/attacks/evasion/fast_gradient.py | 22 +++- .../feature_adversaries_numpy.py | 12 +- .../feature_adversaries_tensorflow.py | 4 +- art/attacks/evasion/hclu.py | 11 +- art/attacks/evasion/hop_skip_jump.py | 43 +++++-- .../imperceptible_asr_pytorch.py | 4 +- .../over_the_air_flickering_pytorch.py | 16 ++- art/attacks/evasion/pe_malware_attack.py | 10 +- art/attacks/evasion/pixel_threshold.py | 19 ++- .../projected_gradient_descent_pytorch.py | 33 +++-- ...rojected_gradient_descent_tensorflow_v2.py | 23 +++- art/attacks/evasion/saliency_map.py | 11 +- art/attacks/evasion/shapeshifter.py | 12 +- art/attacks/evasion/spatial_transformation.py | 24 +++- art/attacks/evasion/virtual_adversarial.py | 6 +- art/attacks/evasion/wasserstein.py | 10 +- art/attacks/evasion/zoo.py | 43 +++++-- art/attacks/extraction/copycat_cnn.py | 5 +- .../functionally_equivalent_extraction.py | 29 ++++- art/attacks/extraction/knockoff_nets.py | 12 +- .../inference/attribute_inference/baseline.py | 4 +- .../membership_inference/black_box.py | 9 +- .../membership_inference/shadow_models.py | 5 +- art/attacks/poisoning/poisoning_attack_svm.py | 7 +- art/defences/detector/evasion/detector.py | 5 +- .../evasion/subsetscanning/scanner.py | 16 ++- .../evasion/subsetscanning/scanningops.py | 8 +- .../subsetscanning/scoring_functions.py | 5 +- .../detector/poison/activation_defence.py | 32 +++-- .../detector/poison/clustering_analyzer.py | 13 +- .../detector/poison/ground_truth_evaluator.py | 54 +++++++-- .../detector/poison/provenance_defense.py | 17 ++- art/defences/detector/poison/roni.py | 12 +- .../poison/spectral_signature_defense.py | 4 +- art/defences/postprocessor/reverse_sigmoid.py | 6 +- .../preprocessor/feature_squeezing.py | 6 +- art/defences/preprocessor/label_smoothing.py | 7 +- art/defences/preprocessor/pixel_defend.py | 5 +- .../trainer/adversarial_trainer_fbf.py | 4 +- .../trainer/adversarial_trainer_madry_pgd.py | 6 +- art/defences/transformer/poisoning/strip.py | 4 +- .../randomized_smoothing.py | 9 +- art/estimators/classification/keras.py | 29 ++++- art/estimators/classification/mxnet.py | 9 +- art/estimators/classification/pytorch.py | 45 +++++-- .../classification/query_efficient_bb.py | 10 +- art/estimators/classification/scikitlearn.py | 75 +++++++++--- art/estimators/classification/tensorflow.py | 41 +++++-- art/estimators/classification/xgboost.py | 13 +- .../python_object_detector.py | 7 +- .../object_detection/pytorch_faster_rcnn.py | 7 +- art/estimators/pytorch.py | 6 +- .../speech_recognition/pytorch_deep_speech.py | 21 +++- .../speech_recognition/pytorch_espresso.py | 36 +++++- .../security_curve/security_curve.py | 5 +- art/metrics/gradient_check.py | 6 +- art/metrics/metrics.py | 18 ++- art/metrics/verification_decisions_trees.py | 25 +++- .../pytorch.py | 6 +- .../tensorflow.py | 6 +- .../standardisation_mean_std/numpy.py | 6 +- art/utils.py | 85 ++++++++++--- art/visualization.py | 6 +- tests/attacks/evasion/test_auto_attack.py | 8 +- tests/attacks/evasion/test_dpatch.py | 9 +- .../evasion/test_imperceptible_asr_pytorch.py | 5 +- .../attacks/evasion/test_pe_malware_attack.py | 7 +- tests/classifiersFrameworks/test_pytorch.py | 6 +- .../poison/test_provenance_defence.py | 25 +++- tests/defences/detector/poison/test_roni.py | 21 +++- .../defences/preprocessor/test_inverse_gan.py | 4 +- .../trainer/test_adversarial_trainer_FBF.py | 8 +- .../test_randomized_smoothing.py | 7 +- .../test_deeplearning_common.py | 50 ++++++-- .../test_deeplearning_specific.py | 26 +++- .../classification/test_scikitlearn.py | 8 +- .../test_pytorch_faster_rcnn.py | 43 +++++-- .../test_pytorch_object_detector.py | 27 ++++- .../test_tensorflow_faster_rcnn.py | 24 +++- .../audio/test_l_filter_pytorch.py | 3 +- tests/utils.py | 19 ++- 95 files changed, 1474 insertions(+), 349 deletions(-) diff --git a/art/__init__.py b/art/__init__.py index 28a537c7ce..c5db3203b1 100644 --- a/art/__init__.py +++ b/art/__init__.py @@ -20,13 +20,25 @@ "version": 1, "disable_existing_loggers": False, "formatters": { - "std": {"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s", "datefmt": "%Y-%m-%d %H:%M",} + "std": { + "format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s", + "datefmt": "%Y-%m-%d %H:%M", + } }, "handlers": { - "default": {"class": "logging.NullHandler",}, - "test": {"class": "logging.StreamHandler", "formatter": "std", "level": logging.INFO,}, + "default": { + "class": "logging.NullHandler", + }, + "test": { + "class": "logging.StreamHandler", + "formatter": "std", + "level": logging.INFO, + }, + }, + "loggers": { + "art": {"handlers": ["default"]}, + "tests": {"handlers": ["test"], "level": "INFO", "propagate": True}, }, - "loggers": {"art": {"handlers": ["default"]}, "tests": {"handlers": ["test"], "level": "INFO", "propagate": True},}, } logging.config.dictConfig(LOGGING) logger = logging.getLogger(__name__) diff --git a/art/attacks/attack.py b/art/attacks/attack.py index a0ecf2e20c..37128445b9 100644 --- a/art/attacks/attack.py +++ b/art/attacks/attack.py @@ -98,7 +98,9 @@ class Attack(abc.ABC): _estimator_requirements: Optional[Union[Tuple[Any, ...], Tuple[()]]] = None def __init__( - self, estimator, tensor_board: Union[str, bool] = False, + self, + estimator, + tensor_board: Union[str, bool] = False, ): """ :param estimator: An estimator. diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py index 5a11996f41..9f9b57b39a 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py @@ -214,12 +214,15 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> T i_batch_end = (i_batch + 1) * self.batch_size gradients = self.estimator.loss_gradient( - patched_images[i_batch_start:i_batch_end], y_target[i_batch_start:i_batch_end], + patched_images[i_batch_start:i_batch_end], + y_target[i_batch_start:i_batch_end], ) for i_image in range(gradients.shape[0]): patch_gradients_i = self._reverse_transformation( - gradients[i_image, :, :, :], patch_mask_transformed[i_image, :, :, :], transforms[i_image], + gradients[i_image, :, :, :], + patch_mask_transformed[i_image, :, :, :], + transforms[i_image], ) if self.nb_dims == 4: patch_gradients_i = np.mean(patch_gradients_i, axis=0) @@ -227,7 +230,11 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> T # patch_gradients = patch_gradients / (num_batches * self.batch_size) self.patch -= patch_gradients * self.learning_rate - self.patch = np.clip(self.patch, a_min=self.estimator.clip_values[0], a_max=self.estimator.clip_values[1],) + self.patch = np.clip( + self.patch, + a_min=self.estimator.clip_values[0], + a_max=self.estimator.clip_values[1], + ) return self.patch, self._get_circular_patch_mask() @@ -329,9 +336,11 @@ def _augment_images_with_random_patch(self, images, patch, mask=None, scale=None else: mask_2d = mask - (patch_transformed, patch_mask_transformed, transformation,) = self._random_transformation( - patch, scale, mask_2d - ) + ( + patch_transformed, + patch_mask_transformed, + transformation, + ) = self._random_transformation(patch, scale, mask_2d) inverted_patch_mask_transformed = 1 - patch_mask_transformed @@ -486,8 +495,18 @@ def _random_transformation(self, patch, scale, mask_2d): transformation["pad_h_before"] = pad_h_before transformation["pad_w_before"] = pad_w_before - patch = np.pad(patch, pad_width=pad_width, mode="constant", constant_values=(0, 0),) - patch_mask = np.pad(patch_mask, pad_width=pad_width, mode="constant", constant_values=(0, 0),) + patch = np.pad( + patch, + pad_width=pad_width, + mode="constant", + constant_values=(0, 0), + ) + patch_mask = np.pad( + patch_mask, + pad_width=pad_width, + mode="constant", + constant_values=(0, 0), + ) # shift if mask_2d is None: diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py index 0ec2f9e561..22efd1b73a 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py @@ -190,7 +190,9 @@ def _predictions(self, images: "torch.Tensor", mask: Optional["torch.Tensor"]) - patched_input = self._random_overlay(images, self._patch, mask=mask) patched_input = torch.clamp( - patched_input, min=self.estimator.clip_values[0], max=self.estimator.clip_values[1], + patched_input, + min=self.estimator.clip_values[0], + max=self.estimator.clip_values[1], ) predictions = self.estimator._predict_framework(patched_input) # pylint: disable=W0212 @@ -255,7 +257,9 @@ def _random_overlay( smallest_image_edge = np.minimum(self.image_shape[self.i_h], self.image_shape[self.i_w]) image_mask = torchvision.transforms.functional.resize( - img=image_mask, size=(smallest_image_edge, smallest_image_edge), interpolation=2, + img=image_mask, + size=(smallest_image_edge, smallest_image_edge), + interpolation=2, ) pad_h_before = int((self.image_shape[self.i_h] - image_mask.shape[self.i_h_patch + 1]) / 2) @@ -281,7 +285,9 @@ def _random_overlay( padded_patch = torch.stack([patch] * nb_samples) padded_patch = torchvision.transforms.functional.resize( - img=padded_patch, size=(smallest_image_edge, smallest_image_edge), interpolation=2, + img=padded_patch, + size=(smallest_image_edge, smallest_image_edge), + interpolation=2, ) padded_patch = torchvision.transforms.functional.pad( @@ -449,13 +455,19 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> T if mask is None: dataset = torch.utils.data.TensorDataset(x_tensor, y_tensor) data_loader = torch.utils.data.DataLoader( - dataset=dataset, batch_size=self.batch_size, shuffle=shuffle, drop_last=False, + dataset=dataset, + batch_size=self.batch_size, + shuffle=shuffle, + drop_last=False, ) else: mask_tensor = torch.Tensor(mask) dataset = torch.utils.data.TensorDataset(x_tensor, y_tensor, mask_tensor) data_loader = torch.utils.data.DataLoader( - dataset=dataset, batch_size=self.batch_size, shuffle=shuffle, drop_last=False, + dataset=dataset, + batch_size=self.batch_size, + shuffle=shuffle, + drop_last=False, ) for i_iter in trange(self.max_iter, desc="Adversarial Patch PyTorch", disable=not self.verbose): @@ -474,7 +486,9 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> T if self.summary_writer is not None: # pragma: no cover self.summary_writer.add_image( - "patch", self._patch, global_step=i_iter, + "patch", + self._patch, + global_step=i_iter, ) if hasattr(self.estimator, "compute_losses"): @@ -485,7 +499,9 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> T for key, value in losses.items(): self.summary_writer.add_scalar( - "loss/{}".format(key), np.mean(value.detach().cpu().numpy()), global_step=i_iter, + "loss/{}".format(key), + np.mean(value.detach().cpu().numpy()), + global_step=i_iter, ) return ( diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py index b4696d7152..5919163d54 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py @@ -189,7 +189,9 @@ def _predictions(self, images: "tf.Tensor", mask: Optional["tf.Tensor"]) -> "tf. patched_input = self._random_overlay(images, self._patch, mask=mask) patched_input = tf.clip_by_value( - patched_input, clip_value_min=self.estimator.clip_values[0], clip_value_max=self.estimator.clip_values[1], + patched_input, + clip_value_min=self.estimator.clip_values[0], + clip_value_max=self.estimator.clip_values[1], ) predictions = self.estimator._predict_framework(patched_input) # pylint: disable=W0212 @@ -335,7 +337,10 @@ def _random_overlay( # Rotation rotation_matrix = np.array( - [[math.cos(-phi_rotate), -math.sin(-phi_rotate)], [math.sin(-phi_rotate), math.cos(-phi_rotate)],] + [ + [math.cos(-phi_rotate), -math.sin(-phi_rotate)], + [math.sin(-phi_rotate), math.cos(-phi_rotate)], + ] ) # Scale @@ -355,11 +360,27 @@ def _random_overlay( transform_vectors.append([a_0, a_1, x_origin_delta, b_0, b_1, y_origin_delta, 0, 0]) translation_vectors.append([1, 0, -x_shift, 0, 1, -y_shift, 0, 0]) - image_mask = tfa.image.transform(image_mask, transform_vectors, "BILINEAR",) - padded_patch = tfa.image.transform(padded_patch, transform_vectors, "BILINEAR",) + image_mask = tfa.image.transform( + image_mask, + transform_vectors, + "BILINEAR", + ) + padded_patch = tfa.image.transform( + padded_patch, + transform_vectors, + "BILINEAR", + ) - image_mask = tfa.image.transform(image_mask, translation_vectors, "BILINEAR",) - padded_patch = tfa.image.transform(padded_patch, translation_vectors, "BILINEAR",) + image_mask = tfa.image.transform( + image_mask, + translation_vectors, + "BILINEAR", + ) + padded_patch = tfa.image.transform( + padded_patch, + translation_vectors, + "BILINEAR", + ) if self.nb_dims == 4: image_mask = tf.stack([image_mask] * images.shape[1], axis=1) @@ -439,7 +460,9 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> T if self.summary_writer is not None: # pragma: no cover self.summary_writer.add_image( - "patch", self._patch.numpy().transpose((2, 0, 1)), global_step=i_iter, + "patch", + self._patch.numpy().transpose((2, 0, 1)), + global_step=i_iter, ) if hasattr(self.estimator, "compute_losses"): @@ -448,7 +471,9 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> T for key, value in losses.items(): self.summary_writer.add_scalar( - "loss/{}".format(key), np.mean(value), global_step=i_iter, + "loss/{}".format(key), + np.mean(value), + global_step=i_iter, ) return ( diff --git a/art/attacks/evasion/auto_attack.py b/art/attacks/evasion/auto_attack.py index 387aec1bcc..90ac5d954f 100644 --- a/art/attacks/evasion/auto_attack.py +++ b/art/attacks/evasion/auto_attack.py @@ -178,7 +178,11 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n attack.set_params(targeted=False) x_adv, sample_is_robust = self._run_attack( - x=x_adv, y=y, sample_is_robust=sample_is_robust, attack=attack, **kwargs, + x=x_adv, + y=y, + sample_is_robust=sample_is_robust, + attack=attack, + **kwargs, ) # Targeted attacks @@ -205,13 +209,22 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n target = check_and_transform_label_format(targeted_labels[:, i], self.estimator.nb_classes) x_adv, sample_is_robust = self._run_attack( - x=x_adv, y=target, sample_is_robust=sample_is_robust, attack=attack, **kwargs, + x=x_adv, + y=target, + sample_is_robust=sample_is_robust, + attack=attack, + **kwargs, ) return x_adv def _run_attack( - self, x: np.ndarray, y: np.ndarray, sample_is_robust: np.ndarray, attack: EvasionAttack, **kwargs, + self, + x: np.ndarray, + y: np.ndarray, + sample_is_robust: np.ndarray, + attack: EvasionAttack, + **kwargs, ) -> Tuple[np.ndarray, np.ndarray]: """ Run attack. diff --git a/art/attacks/evasion/boundary.py b/art/attacks/evasion/boundary.py index 99580088f9..94b727d74d 100644 --- a/art/attacks/evasion/boundary.py +++ b/art/attacks/evasion/boundary.py @@ -189,7 +189,14 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n return x_adv def _perturb( - self, x: np.ndarray, y: int, y_p: int, init_pred: int, adv_init: np.ndarray, clip_min: float, clip_max: float, + self, + x: np.ndarray, + y: int, + y_p: int, + init_pred: int, + adv_init: np.ndarray, + clip_min: float, + clip_max: float, ) -> np.ndarray: """ Internal attack function for one example. @@ -212,7 +219,14 @@ def _perturb( # If an initial adversarial example found, then go with boundary attack x_adv = self._attack( - initial_sample[0], x, y_p, initial_sample[1], self.delta, self.epsilon, clip_min, clip_max, + initial_sample[0], + x, + y_p, + initial_sample[1], + self.delta, + self.epsilon, + clip_min, + clip_max, ) return x_adv @@ -258,7 +272,10 @@ def _attack( potential_adv = np.clip(potential_adv, clip_min, clip_max) potential_advs.append(potential_adv) - preds = np.argmax(self.estimator.predict(np.array(potential_advs), batch_size=self.batch_size), axis=1,) + preds = np.argmax( + self.estimator.predict(np.array(potential_advs), batch_size=self.batch_size), + axis=1, + ) if self.targeted: satisfied = preds == target @@ -285,7 +302,10 @@ def _attack( perturb *= self.curr_epsilon potential_advs = x_advs + perturb potential_advs = np.clip(potential_advs, clip_min, clip_max) - preds = np.argmax(self.estimator.predict(potential_advs, batch_size=self.batch_size), axis=1,) + preds = np.argmax( + self.estimator.predict(potential_advs, batch_size=self.batch_size), + axis=1, + ) if self.targeted: satisfied = preds == target @@ -343,7 +363,14 @@ def _orthogonal_perturb(self, delta: float, current_sample: np.ndarray, original return perturb def _init_sample( - self, x: np.ndarray, y: int, y_p: int, init_pred: int, adv_init: np.ndarray, clip_min: float, clip_max: float, + self, + x: np.ndarray, + y: int, + y_p: int, + init_pred: int, + adv_init: np.ndarray, + clip_min: float, + clip_max: float, ) -> Optional[Tuple[np.ndarray, int]]: """ Find initial adversarial example for the attack. @@ -373,7 +400,8 @@ def _init_sample( for _ in range(self.init_size): random_img = nprd.uniform(clip_min, clip_max, size=x.shape).astype(x.dtype) random_class = np.argmax( - self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), axis=1, + self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), + axis=1, )[0] if random_class == y: @@ -393,7 +421,8 @@ def _init_sample( for _ in range(self.init_size): random_img = nprd.uniform(clip_min, clip_max, size=x.shape).astype(x.dtype) random_class = np.argmax( - self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), axis=1, + self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), + axis=1, )[0] if random_class != y_p: diff --git a/art/attacks/evasion/brendel_bethge.py b/art/attacks/evasion/brendel_bethge.py index bfd0d9ed81..22a3608be2 100644 --- a/art/attacks/evasion/brendel_bethge.py +++ b/art/attacks/evasion/brendel_bethge.py @@ -115,9 +115,15 @@ def solve(self, fun_and_jac, q0, bounds, args, ftol=1e-10, pgtol=-1e-5, maxiter= qk1 = self._subspace_min(qk, var_l, u, x_cp, _gfk.copy(), Hk) pk = qk1 - qk - (alpha_k, fc, gc, old_fval, old_old_fval, gfkp1, fnev,) = self._line_search_wolfe( - fun_and_jac, qk, pk, _gfk, old_fval, old_old_fval, var_l, u, args - ) + ( + alpha_k, + fc, + gc, + old_fval, + old_old_fval, + gfkp1, + fnev, + ) = self._line_search_wolfe(fun_and_jac, qk, pk, _gfk, old_fval, old_old_fval, var_l, u, args) func_calls += fnev if alpha_k is None: @@ -276,7 +282,22 @@ def _project(self, q, var_l, u): return q def _line_search_armijo( - self, fun_and_jac, pt, dpt, func_calls, m, gk, var_l, u, x0, x, b, min_, max_, c, r, + self, + fun_and_jac, + pt, + dpt, + func_calls, + m, + gk, + var_l, + u, + x0, + x, + b, + min_, + max_, + c, + r, ): ls_rho = 0.6 ls_c = 1e-4 @@ -298,7 +319,16 @@ def _line_search_armijo( return ls_alpha, ls_pt, gkp1, dgkp1, func_calls def _line_search_wolfe( - self, fun_and_jac, xk, pk, gfk, old_fval, old_old_fval, var_l, u, args, + self, + fun_and_jac, + xk, + pk, + gfk, + old_fval, + old_old_fval, + var_l, + u, + args, ): """Find alpha that satisfies strong Wolfe conditions. Uses the line search algorithm to enforce strong Wolfe conditions @@ -1382,7 +1412,19 @@ def optimize_distance_s_t_boundary_and_trustregion(self, x0, x, b, min_, max_, c return self.minimize(params0, bounds, x0, x, b, min_, max_, c, r) def minimize( - self, q0, bounds, x0, x, b, min_, max_, c, r, ftol=1e-9, xtol=-1e-5, maxiter=1000, + self, + q0, + bounds, + x0, + x, + b, + min_, + max_, + c, + r, + ftol=1e-9, + xtol=-1e-5, + maxiter=1000, ): # First check whether solution can be computed without trust region delta, delta_norm = self.minimize_without_trustregion(x0, x, b, c, r, min_, max_) @@ -1436,7 +1478,17 @@ def minimize_without_trustregion(self, x0, x, b, c, r, ell, u): return delta, delta_norm def _nelder_mead_algorithm( - self, q0, bounds, args=(), ρ=1.0, χ=2.0, γ=0.5, σ=0.5, tol_f=1e-8, tol_x=1e-8, max_iter=1000, + self, + q0, + bounds, + args=(), + ρ=1.0, + χ=2.0, + γ=0.5, + σ=0.5, + tol_f=1e-8, + tol_x=1e-8, + max_iter=1000, ): """ Implements the Nelder-Mead algorithm described in Lagarias et al. (1998) @@ -2112,7 +2164,12 @@ def logits_difference(y_pred, y_true): # type: ignore else: self.theta = 0.01 / np.prod(self.estimator.input_shape) - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs,) -> np.ndarray: + def generate( + self, + x: np.ndarray, + y: Optional[np.ndarray] = None, + **kwargs, + ) -> np.ndarray: """ Applies the Brendel & Bethge attack. @@ -2289,7 +2346,9 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs,) -> # we aim to slight overshoot over the boundary to stay within the adversarial region corr_logits_diffs = np.where( - -logits_diffs < 0, -self.overshoot * logits_diffs, -(2 - self.overshoot) * logits_diffs, + -logits_diffs < 0, + -self.overshoot * logits_diffs, + -(2 - self.overshoot) * logits_diffs, ) # employ solver to find optimal step within trust region for each sample @@ -2324,7 +2383,11 @@ def norms(self, x: np.ndarray) -> np.ndarray: return norm def mid_points( - self, x0: np.ndarray, x1: np.ndarray, epsilons: np.ndarray, bounds: Tuple[float, float], + self, + x0: np.ndarray, + x1: np.ndarray, + epsilons: np.ndarray, + bounds: Tuple[float, float], ) -> np.ndarray: """ returns a point between x0 and x1 where epsilon = 0 returns x0 and epsilon = 1 returns x1 @@ -2361,7 +2424,14 @@ def mid_points( return new_x.astype(config.ART_NUMPY_DTYPE) def _init_sample( - self, x: np.ndarray, y: int, y_p: int, init_pred: int, adv_init: np.ndarray, clip_min: float, clip_max: float, + self, + x: np.ndarray, + y: int, + y_p: int, + init_pred: int, + adv_init: np.ndarray, + clip_min: float, + clip_max: float, ) -> Optional[Union[np.ndarray, Tuple[np.ndarray, int]]]: """ Find initial adversarial example for the attack. @@ -2391,7 +2461,8 @@ def _init_sample( for _ in range(self.init_size): random_img = nprd.uniform(clip_min, clip_max, size=x.shape).astype(x.dtype) random_class = np.argmax( - self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), axis=1, + self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), + axis=1, )[0] if random_class == y: @@ -2421,7 +2492,8 @@ def _init_sample( for _ in range(self.init_size): random_img = nprd.uniform(clip_min, clip_max, size=x.shape).astype(x.dtype) random_class = np.argmax( - self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), axis=1, + self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), + axis=1, )[0] if random_class != y_p: @@ -2487,18 +2559,27 @@ def _binary_search( # Interpolation point alpha = (upper_bound + lower_bound) / 2.0 interpolated_sample = self._interpolate( - current_sample=current_sample, original_sample=original_sample, alpha=alpha, norm=norm, + current_sample=current_sample, + original_sample=original_sample, + alpha=alpha, + norm=norm, ) # Update upper_bound and lower_bound satisfied = self._adversarial_satisfactory( - samples=interpolated_sample[None], target=target, clip_min=clip_min, clip_max=clip_max, + samples=interpolated_sample[None], + target=target, + clip_min=clip_min, + clip_max=clip_max, )[0] lower_bound = np.where(satisfied == 0, alpha, lower_bound) upper_bound = np.where(satisfied == 1, alpha, upper_bound) result = self._interpolate( - current_sample=current_sample, original_sample=original_sample, alpha=upper_bound, norm=norm, + current_sample=current_sample, + original_sample=original_sample, + alpha=upper_bound, + norm=norm, ) return result diff --git a/art/attacks/evasion/carlini.py b/art/attacks/evasion/carlini.py index 9afb4b2e8a..41f523751d 100644 --- a/art/attacks/evasion/carlini.py +++ b/art/attacks/evasion/carlini.py @@ -149,11 +149,14 @@ def _loss( """ l2dist = np.sum(np.square(x - x_adv).reshape(x.shape[0], -1), axis=1) z_predicted = self.estimator.predict( - np.array(x_adv, dtype=ART_NUMPY_DTYPE), logits=True, batch_size=self.batch_size, + np.array(x_adv, dtype=ART_NUMPY_DTYPE), + logits=True, + batch_size=self.batch_size, ) z_target = np.sum(z_predicted * target, axis=1) z_other = np.max( - z_predicted * (1 - target) + (np.min(z_predicted, axis=1) - 1)[:, np.newaxis] * target, axis=1, + z_predicted * (1 - target) + (np.min(z_predicted, axis=1) - 1)[:, np.newaxis] * target, + axis=1, ) # The following differs from the exact definition given in Carlini and Wagner (2016). There (page 9, left @@ -197,10 +200,16 @@ def _loss_gradient( """ if self.targeted: i_sub = np.argmax(target, axis=1) - i_add = np.argmax(z_logits * (1 - target) + (np.min(z_logits, axis=1) - 1)[:, np.newaxis] * target, axis=1,) + i_add = np.argmax( + z_logits * (1 - target) + (np.min(z_logits, axis=1) - 1)[:, np.newaxis] * target, + axis=1, + ) else: i_add = np.argmax(target, axis=1) - i_sub = np.argmax(z_logits * (1 - target) + (np.min(z_logits, axis=1) - 1)[:, np.newaxis] * target, axis=1,) + i_sub = np.argmax( + z_logits * (1 - target) + (np.min(z_logits, axis=1) - 1)[:, np.newaxis] * target, + axis=1, + ) loss_gradient = self.estimator.class_gradient(x_adv, label=i_add) loss_gradient -= self.estimator.class_gradient(x_adv, label=i_sub) @@ -270,11 +279,16 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n for bss in range(self.binary_search_steps): logger.debug( - "Binary search step %i out of %i (c_mean==%f)", bss, self.binary_search_steps, np.mean(c_current), + "Binary search step %i out of %i (c_mean==%f)", + bss, + self.binary_search_steps, + np.mean(c_current), ) nb_active = int(np.sum(c_current < self._c_upper_bound)) logger.debug( - "Number of samples with c_current < _c_upper_bound: %i out of %i", nb_active, x_batch.shape[0], + "Number of samples with c_current < _c_upper_bound: %i out of %i", + nb_active, + x_batch.shape[0], ) if nb_active == 0: # pragma: no cover break @@ -337,11 +351,14 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n for i_halve in range(self.max_halving): logger.debug( - "Perform halving iteration %i out of %i", i_halve, self.max_halving, + "Perform halving iteration %i out of %i", + i_halve, + self.max_halving, ) do_halving = loss[active] >= prev_loss[active] logger.debug( - "Halving to be performed on %i samples", int(np.sum(do_halving)), + "Halving to be performed on %i samples", + int(np.sum(do_halving)), ) if np.sum(do_halving) == 0: break @@ -376,11 +393,14 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n # decreases the loss: for i_double in range(self.max_doubling): logger.debug( - "Perform doubling iteration %i out of %i", i_double, self.max_doubling, + "Perform doubling iteration %i out of %i", + i_double, + self.max_doubling, ) do_doubling = (halving[active] == 1) & (loss[active] <= best_loss[active]) logger.debug( - "Doubling to be performed on %i samples", int(np.sum(do_doubling)), + "Doubling to be performed on %i samples", + int(np.sum(do_doubling)), ) if np.sum(do_doubling) == 0: break @@ -411,7 +431,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n update_adv = best_lr[active] > 0 logger.debug( - "Number of adversarial samples to be finally updated: %i", int(np.sum(update_adv)), + "Number of adversarial samples to be finally updated: %i", + int(np.sum(update_adv)), ) if np.sum(update_adv) > 0: @@ -566,7 +587,8 @@ def _loss( z_predicted = self.estimator.predict(np.array(x_adv, dtype=ART_NUMPY_DTYPE), batch_size=1) z_target = np.sum(z_predicted * target, axis=1) z_other = np.max( - z_predicted * (1 - target) + (np.min(z_predicted, axis=1) - 1)[:, np.newaxis] * target, axis=1, + z_predicted * (1 - target) + (np.min(z_predicted, axis=1) - 1)[:, np.newaxis] * target, + axis=1, ) if self.targeted: @@ -608,10 +630,16 @@ def _loss_gradient( """ if self.targeted: i_sub = np.argmax(target, axis=1) - i_add = np.argmax(z_logits * (1 - target) + (np.min(z_logits, axis=1) - 1)[:, np.newaxis] * target, axis=1,) + i_add = np.argmax( + z_logits * (1 - target) + (np.min(z_logits, axis=1) - 1)[:, np.newaxis] * target, + axis=1, + ) else: i_add = np.argmax(target, axis=1) - i_sub = np.argmax(z_logits * (1 - target) + (np.min(z_logits, axis=1) - 1)[:, np.newaxis] * target, axis=1,) + i_sub = np.argmax( + z_logits * (1 - target) + (np.min(z_logits, axis=1) - 1)[:, np.newaxis] * target, + axis=1, + ) loss_gradient = self.estimator.class_gradient(x_adv, label=i_add) loss_gradient -= self.estimator.class_gradient(x_adv, label=i_sub) @@ -646,7 +674,11 @@ def _generate_single(self, x_batch, y_batch, clip_min, clip_max, const, tau): def func(x_i): x_adv_batch_tanh = x_i - x_adv_batch = tanh_to_original(x_adv_batch_tanh, clip_min, clip_max,) + x_adv_batch = tanh_to_original( + x_adv_batch_tanh, + clip_min, + clip_max, + ) _, loss, _, _ = self._loss(x_adv_batch, y_batch, x_batch, const, tau) @@ -655,12 +687,23 @@ def func(x_i): def func_der(x_i): x_adv_batch_tanh = x_i - x_adv_batch = tanh_to_original(x_adv_batch_tanh, clip_min, clip_max,) + x_adv_batch = tanh_to_original( + x_adv_batch_tanh, + clip_min, + clip_max, + ) z_logits, _, _, _ = self._loss(x_adv_batch, y_batch, x_batch, const, tau) perturbation_tanh = self._loss_gradient( - z_logits, y_batch, x_adv_batch, x_adv_batch_tanh, clip_min, clip_max, x_batch, tau, + z_logits, + y_batch, + x_adv_batch, + x_adv_batch_tanh, + clip_min, + clip_max, + x_batch, + tau, ) return perturbation_tanh @@ -670,7 +713,11 @@ def func_der(x_i): adam = Adam(alpha=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8) x_adv_batch_tanh = adam.optimize(func=func, jac=func_der, x_0=x_0, max_iter=self.max_iter, loss_converged=0.001) - x_adv_batch = tanh_to_original(x_adv_batch_tanh, clip_min, clip_max,) + x_adv_batch = tanh_to_original( + x_adv_batch_tanh, + clip_min, + clip_max, + ) return x_adv_batch @@ -1171,7 +1218,14 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n # Compute the gradients of the objective function evaluated at the adversarial instance x_adv_tanh = original_to_tanh(x_adv, clip_min, clip_max, self._tanh_smoother) objective_loss_gradient = -self._loss_gradient( - z_logits, y, x, x_adv, x_adv_tanh, c_final, clip_min, clip_max, + z_logits, + y, + x, + x_adv, + x_adv_tanh, + c_final, + clip_min, + clip_max, ) perturbation_l1_norm = np.abs(x_adv - x) diff --git a/art/attacks/evasion/decision_tree_attack.py b/art/attacks/evasion/decision_tree_attack.py index 517f909f2e..0d6c88c226 100644 --- a/art/attacks/evasion/decision_tree_attack.py +++ b/art/attacks/evasion/decision_tree_attack.py @@ -45,7 +45,10 @@ class DecisionTreeAttack(EvasionAttack): _estimator_requirements = (ScikitlearnDecisionTreeClassifier,) def __init__( - self, classifier: ScikitlearnDecisionTreeClassifier, offset: float = 0.001, verbose: bool = True, + self, + classifier: ScikitlearnDecisionTreeClassifier, + offset: float = 0.001, + verbose: bool = True, ) -> None: """ :param classifier: A trained scikit-learn decision tree model. @@ -58,7 +61,10 @@ def __init__( self._check_params() def _df_subtree( - self, position: int, original_class: Union[int, np.ndarray], target: Optional[int] = None, + self, + position: int, + original_class: Union[int, np.ndarray], + target: Optional[int] = None, ) -> List[int]: """ Search a decision tree for a mis-classifying instance. @@ -126,14 +132,18 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n adv_path = self._df_subtree(self.estimator.get_right_child(ancestor), legitimate_class) else: adv_path = self._df_subtree( - self.estimator.get_right_child(ancestor), legitimate_class, y[index], + self.estimator.get_right_child(ancestor), + legitimate_class, + y[index], ) else: # search in left subtree if y is None: adv_path = self._df_subtree(self.estimator.get_left_child(ancestor), legitimate_class) else: adv_path = self._df_subtree( - self.estimator.get_left_child(ancestor), legitimate_class, y[index], + self.estimator.get_left_child(ancestor), + legitimate_class, + y[index], ) position = position - 1 # we are going the decision path upwards adv_path.append(ancestor) @@ -150,7 +160,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n x_adv[index][feature] = threshold + self.offset logger.info( - "Success rate of decision tree attack: %.2f%%", 100 * compute_success(self.estimator, x, y, x_adv), + "Success rate of decision tree attack: %.2f%%", + 100 * compute_success(self.estimator, x, y, x_adv), ) return x_adv diff --git a/art/attacks/evasion/deepfool.py b/art/attacks/evasion/deepfool.py index 05aff9e920..76f92a74e4 100644 --- a/art/attacks/evasion/deepfool.py +++ b/art/attacks/evasion/deepfool.py @@ -159,7 +159,13 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n l_var = np.argmin(value, axis=1) absolute1 = abs(f_diff[np.arange(len(f_diff)), l_var]) draddiff = grad_diff[np.arange(len(grad_diff)), l_var].reshape(len(grad_diff), -1) - pow1 = pow(np.linalg.norm(draddiff, axis=1), 2,) + tol + pow1 = ( + pow( + np.linalg.norm(draddiff, axis=1), + 2, + ) + + tol + ) r_var = absolute1 / pow1 r_var = r_var.reshape((-1,) + (1,) * (len(x.shape) - 1)) r_var = r_var * grad_diff[np.arange(len(grad_diff)), l_var] diff --git a/art/attacks/evasion/dpatch.py b/art/attacks/evasion/dpatch.py index 840703a92f..3077692afe 100644 --- a/art/attacks/evasion/dpatch.py +++ b/art/attacks/evasion/dpatch.py @@ -177,8 +177,16 @@ def generate( # pylint: disable=W0221 target_dict = dict() target_dict["boxes"] = np.asarray([[i_x_1, i_y_1, i_x_2, i_y_2]]) - target_dict["labels"] = np.asarray([t_l,]) - target_dict["scores"] = np.asarray([1.0,]) + target_dict["labels"] = np.asarray( + [ + t_l, + ] + ) + target_dict["scores"] = np.asarray( + [ + 1.0, + ] + ) patch_target.append(target_dict) @@ -232,7 +240,9 @@ def generate( # pylint: disable=W0221 if self.estimator.clip_values is not None: self._patch = np.clip( - self._patch, a_min=self.estimator.clip_values[0], a_max=self.estimator.clip_values[1], + self._patch, + a_min=self.estimator.clip_values[0], + a_max=self.estimator.clip_values[1], ) patched_images, _ = self._augment_images_with_patch( diff --git a/art/attacks/evasion/dpatch_robust.py b/art/attacks/evasion/dpatch_robust.py index 5daeef4ca4..dd97f57136 100644 --- a/art/attacks/evasion/dpatch_robust.py +++ b/art/attacks/evasion/dpatch_robust.py @@ -199,7 +199,11 @@ def generate(self, x: np.ndarray, y: Optional[List[Dict[str, np.ndarray]]] = Non x[i_batch_start:i_batch_end], y_batch, self._patch, channels_first=self.estimator.channels_first ) - gradients = self.estimator.loss_gradient(x=patched_images, y=patch_target, standardise_output=True,) + gradients = self.estimator.loss_gradient( + x=patched_images, + y=patch_target, + standardise_output=True, + ) gradients = self._untransform_gradients( gradients, transforms, channels_first=self.estimator.channels_first @@ -217,7 +221,9 @@ def generate(self, x: np.ndarray, y: Optional[List[Dict[str, np.ndarray]]] = Non if self.estimator.clip_values is not None: self._patch = np.clip( - self._patch, a_min=self.estimator.clip_values[0], a_max=self.estimator.clip_values[1], + self._patch, + a_min=self.estimator.clip_values[0], + a_max=self.estimator.clip_values[1], ) return self._patch @@ -348,7 +354,10 @@ def _augment_images_with_patch( return x_patch, patch_target, transformations def _untransform_gradients( - self, gradients: np.ndarray, transforms: Dict[str, Union[int, float]], channels_first: bool, + self, + gradients: np.ndarray, + transforms: Dict[str, Union[int, float]], + channels_first: bool, ) -> np.ndarray: """ Revert transformation on gradients. diff --git a/art/attacks/evasion/elastic_net.py b/art/attacks/evasion/elastic_net.py index 9ab9de20dc..2739bf2368 100644 --- a/art/attacks/evasion/elastic_net.py +++ b/art/attacks/evasion/elastic_net.py @@ -130,7 +130,11 @@ def _loss(self, x: np.ndarray, x_adv: np.ndarray) -> tuple: return np.argmax(predictions, axis=1), l1dist, l2dist, endist def _gradient_of_loss( - self, target: np.ndarray, x: np.ndarray, x_adv: np.ndarray, c_weight: np.ndarray, + self, + target: np.ndarray, + x: np.ndarray, + x_adv: np.ndarray, + c_weight: np.ndarray, ) -> np.ndarray: """ Compute the gradient of the loss function. @@ -147,12 +151,14 @@ def _gradient_of_loss( if self.targeted: i_sub = np.argmax(target, axis=1) i_add = np.argmax( - predictions * (1 - target) + (np.min(predictions, axis=1) - 1)[:, np.newaxis] * target, axis=1, + predictions * (1 - target) + (np.min(predictions, axis=1) - 1)[:, np.newaxis] * target, + axis=1, ) else: i_add = np.argmax(target, axis=1) i_sub = np.argmax( - predictions * (1 - target) + (np.min(predictions, axis=1) - 1)[:, np.newaxis] * target, axis=1, + predictions * (1 - target) + (np.min(predictions, axis=1) - 1)[:, np.newaxis] * target, + axis=1, ) loss_gradient = self.estimator.class_gradient(x_adv, label=i_add) @@ -254,7 +260,10 @@ def _generate_batch(self, x_batch: np.ndarray, y_batch: np.ndarray) -> np.ndarra # Start with a binary search for bss in range(self.binary_search_steps): logger.debug( - "Binary search step %i out of %i (c_mean==%f)", bss, self.binary_search_steps, np.mean(c_current), + "Binary search step %i out of %i (c_mean==%f)", + bss, + self.binary_search_steps, + np.mean(c_current), ) # Run with 1 specific binary search step diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 16223caa67..2a2c30ba75 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -253,7 +253,16 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n rate_best = None for _ in range(max(1, self.num_random_init)): - adv_x = self._compute(x, x, y, mask, self.eps, self.eps, self._project, self.num_random_init > 0,) + adv_x = self._compute( + x, + x, + y, + mask, + self.eps, + self.eps, + self._project, + self.num_random_init > 0, + ) if self.num_random_init > 1: rate = 100 * compute_success( @@ -298,7 +307,16 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n logger.info("Using model predictions as correct labels for FGM.") y = self.estimator.predict(x, batch_size=self.batch_size) - adv_x_best = self._compute(x, x, y, None, self.eps, self.eps, self._project, self.num_random_init > 0,) + adv_x_best = self._compute( + x, + x, + y, + None, + self.eps, + self.eps, + self._project, + self.num_random_init > 0, + ) return adv_x_best diff --git a/art/attacks/evasion/feature_adversaries/feature_adversaries_numpy.py b/art/attacks/evasion/feature_adversaries/feature_adversaries_numpy.py index 6e1fc25e57..97ac2ba0d3 100644 --- a/art/attacks/evasion/feature_adversaries/feature_adversaries_numpy.py +++ b/art/attacks/evasion/feature_adversaries/feature_adversaries_numpy.py @@ -153,10 +153,18 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n def func(x_i): x_i = x_i.astype(x.dtype) source_representation = self.estimator.get_activations( - x=x_i.reshape(-1, *self.estimator.input_shape), layer=self.layer, batch_size=self.batch_size, + x=x_i.reshape(-1, *self.estimator.input_shape), + layer=self.layer, + batch_size=self.batch_size, ) - n = norm(source_representation.flatten() - guide_representation.flatten(), ord=2,) ** 2 + n = ( + norm( + source_representation.flatten() - guide_representation.flatten(), + ord=2, + ) + ** 2 + ) return n diff --git a/art/attacks/evasion/feature_adversaries/feature_adversaries_tensorflow.py b/art/attacks/evasion/feature_adversaries/feature_adversaries_tensorflow.py index 1a7b177271..e4d3b7d5d9 100644 --- a/art/attacks/evasion/feature_adversaries/feature_adversaries_tensorflow.py +++ b/art/attacks/evasion/feature_adversaries/feature_adversaries_tensorflow.py @@ -155,7 +155,9 @@ def loss_fn(source_orig, source_adv, guide): # optimize soft constraint problem with chosen optimizer opt = self.optimizer(**self._optimizer_kwargs) # type: ignore perturbation = tf.Variable( - tf.zeros_like(adv), trainable=True, constraint=lambda x: tf.clip_by_value(x, -self.delta, self.delta), + tf.zeros_like(adv), + trainable=True, + constraint=lambda x: tf.clip_by_value(x, -self.delta, self.delta), ) for _ in trange(self.max_iter, desc="Feature Adversaries TensorFlow v2", disable=not self.verbose): diff --git a/art/attacks/evasion/hclu.py b/art/attacks/evasion/hclu.py index 7706e0fbf2..aee0270a2a 100644 --- a/art/attacks/evasion/hclu.py +++ b/art/attacks/evasion/hclu.py @@ -114,9 +114,16 @@ def constraint_unc(x, args): # constraint for uncertainty constr_unc = {"type": "ineq", "fun": constraint_unc, "args": (init_args,)} args = {"args": init_args, "orig": x[i].reshape(-1)} # finally, run optimization - x_adv[i] = minimize(minfun, x_adv[i], args=args, bounds=bounds, constraints=[constr_conf, constr_unc],)["x"] + x_adv[i] = minimize( + minfun, + x_adv[i], + args=args, + bounds=bounds, + constraints=[constr_conf, constr_unc], + )["x"] logger.info( - "Success rate of HCLU attack: %.2f%%", 100 * compute_success(self.estimator, x, y, x_adv), + "Success rate of HCLU attack: %.2f%%", + 100 * compute_success(self.estimator, x, y, x_adv), ) return x_adv diff --git a/art/attacks/evasion/hop_skip_jump.py b/art/attacks/evasion/hop_skip_jump.py index 5e11e6a79e..7276ad400d 100644 --- a/art/attacks/evasion/hop_skip_jump.py +++ b/art/attacks/evasion/hop_skip_jump.py @@ -306,7 +306,8 @@ def _init_sample( random_img = random_img * mask + x * (1 - mask) random_class = np.argmax( - self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), axis=1, + self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), + axis=1, )[0] if random_class == y: @@ -340,7 +341,8 @@ def _init_sample( random_img = random_img * mask + x * (1 - mask) random_class = np.argmax( - self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), axis=1, + self.estimator.predict(np.array([random_img]), batch_size=self.batch_size), + axis=1, )[0] if random_class != y_p: @@ -392,7 +394,10 @@ def _attack( for _ in range(self.max_iter): # First compute delta delta = self._compute_delta( - current_sample=current_sample, original_sample=original_sample, clip_min=clip_min, clip_max=clip_max, + current_sample=current_sample, + original_sample=original_sample, + clip_min=clip_min, + clip_max=clip_max, ) # Then run binary search @@ -431,7 +436,10 @@ def _attack( epsilon /= 2.0 potential_sample = current_sample + epsilon * update success = self._adversarial_satisfactory( - samples=potential_sample[None], target=target, clip_min=clip_min, clip_max=clip_max, + samples=potential_sample[None], + target=target, + clip_min=clip_min, + clip_max=clip_max, ) # Update current sample @@ -490,24 +498,37 @@ def _binary_search( # Interpolation point alpha = (upper_bound + lower_bound) / 2.0 interpolated_sample = self._interpolate( - current_sample=current_sample, original_sample=original_sample, alpha=alpha, norm=norm, + current_sample=current_sample, + original_sample=original_sample, + alpha=alpha, + norm=norm, ) # Update upper_bound and lower_bound satisfied = self._adversarial_satisfactory( - samples=interpolated_sample[None], target=target, clip_min=clip_min, clip_max=clip_max, + samples=interpolated_sample[None], + target=target, + clip_min=clip_min, + clip_max=clip_max, )[0] lower_bound = np.where(satisfied == 0, alpha, lower_bound) upper_bound = np.where(satisfied == 1, alpha, upper_bound) result = self._interpolate( - current_sample=current_sample, original_sample=original_sample, alpha=upper_bound, norm=norm, + current_sample=current_sample, + original_sample=original_sample, + alpha=upper_bound, + norm=norm, ) return result def _compute_delta( - self, current_sample: np.ndarray, original_sample: np.ndarray, clip_min: float, clip_max: float, + self, + current_sample: np.ndarray, + original_sample: np.ndarray, + clip_min: float, + clip_max: float, ) -> float: """ Compute the delta parameter. @@ -569,7 +590,11 @@ def _compute_update( # Normalize random noise to fit into the range of input data rnd_noise = rnd_noise / np.sqrt( - np.sum(rnd_noise ** 2, axis=tuple(range(len(rnd_noise_shape)))[1:], keepdims=True,) + np.sum( + rnd_noise ** 2, + axis=tuple(range(len(rnd_noise_shape)))[1:], + keepdims=True, + ) ) eval_samples = np.clip(current_sample + delta * rnd_noise, clip_min, clip_max) rnd_noise = (eval_samples - current_sample) / delta diff --git a/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py b/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py index 5049da24cc..fc69fe8e11 100644 --- a/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py +++ b/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py @@ -472,7 +472,9 @@ def _forward_1st_stage( # Compute loss and decoded output loss, decoded_output = self.estimator.compute_loss_and_decoded_output( - masked_adv_input=masked_adv_input, original_output=original_output, real_lengths=real_lengths, + masked_adv_input=masked_adv_input, + original_output=original_output, + real_lengths=real_lengths, ) return loss, local_delta, decoded_output, masked_adv_input, local_delta_rescale diff --git a/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py b/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py index 54db767d0a..8e3b1883e7 100644 --- a/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py +++ b/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py @@ -141,7 +141,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n y = get_labels_np_array(self.estimator.predict(x, batch_size=self.batch_size)) dataset = torch.utils.data.TensorDataset( - torch.from_numpy(x.astype(ART_NUMPY_DTYPE)), torch.from_numpy(y.astype(ART_NUMPY_DTYPE)), + torch.from_numpy(x.astype(ART_NUMPY_DTYPE)), + torch.from_numpy(y.astype(ART_NUMPY_DTYPE)), ) data_loader = torch.utils.data.DataLoader( @@ -176,12 +177,21 @@ def _generate_batch(self, x: "torch.Tensor", y: "torch.Tensor") -> np.ndarray: x_adv = torch.clone(x) for _ in range(self.max_iter): - x_adv = self._compute_torch(x_adv, x, y, self.eps_step,) + x_adv = self._compute_torch( + x_adv, + x, + y, + self.eps_step, + ) return x_adv.cpu().detach().numpy() def _compute_torch( - self, x_adv: "torch.Tensor", x: "torch.Tensor", y: "torch.Tensor", eps_step: float, + self, + x_adv: "torch.Tensor", + x: "torch.Tensor", + y: "torch.Tensor", + eps_step: float, ) -> "torch.Tensor": """ Compute adversarial examples for one iteration. diff --git a/art/attacks/evasion/pe_malware_attack.py b/art/attacks/evasion/pe_malware_attack.py index 2be7aad42b..2d27b1dfa3 100644 --- a/art/attacks/evasion/pe_malware_attack.py +++ b/art/attacks/evasion/pe_malware_attack.py @@ -168,7 +168,10 @@ def initialise_sample( return x def check_valid_size( - self, y: np.ndarray, sample_sizes: np.ndarray, append_perturbation_size: np.ndarray, + self, + y: np.ndarray, + sample_sizes: np.ndarray, + append_perturbation_size: np.ndarray, ) -> np.ndarray: """ Checks that we can append the l0 perturbation to the malware sample and not exceed the @@ -367,7 +370,10 @@ def pull_out_adversarial_malware( return adv_x, adv_y, adv_sample_sizes def compute_perturbation_regions( - self, input_perturbation_size: np.ndarray, input_perturb_sizes: List[List[int]], automatically_append: bool, + self, + input_perturbation_size: np.ndarray, + input_perturb_sizes: List[List[int]], + automatically_append: bool, ) -> Tuple[np.ndarray, List[List[int]]]: """ Based on the l0 budget and the provided allowable perturbation regions we iteratively mark regions of the PE diff --git a/art/attacks/evasion/pixel_threshold.py b/art/attacks/evasion/pixel_threshold.py index 3e4d2911fe..3117fd9c39 100644 --- a/art/attacks/evasion/pixel_threshold.py +++ b/art/attacks/evasion/pixel_threshold.py @@ -473,7 +473,11 @@ def _perturb_image(self, x: np.ndarray, img: np.ndarray) -> np.ndarray: x = x.astype(int) for adv, image in zip(x, imgs): for count, (i, j, k) in enumerate( - product(range(image.shape[-3]), range(image.shape[-2]), range(image.shape[-1]),) + product( + range(image.shape[-3]), + range(image.shape[-2]), + range(image.shape[-1]), + ) ): image[i, j, k] = adv[count] return imgs @@ -1210,7 +1214,10 @@ def solve(self): if ( self.callback - and self.callback(self._scale_parameters(self.population[0]), convergence=self.tol / convergence,) + and self.callback( + self._scale_parameters(self.population[0]), + convergence=self.tol / convergence, + ) is True ): warning_flag = True @@ -1235,7 +1242,13 @@ def solve(self): ) if self.polish: - result = minimize(self.func, np.copy(de_result.x), method="L-BFGS-B", bounds=self.limits.T, args=self.args,) + result = minimize( + self.func, + np.copy(de_result.x), + method="L-BFGS-B", + bounds=self.limits.T, + args=self.args, + ) self._nfev += result.nfev de_result.nfev = self._nfev diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index f5715c3b97..cdb52ab616 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -165,7 +165,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n else: dataset = torch.utils.data.TensorDataset( - torch.from_numpy(x.astype(ART_NUMPY_DTYPE)), torch.from_numpy(targets.astype(ART_NUMPY_DTYPE)), + torch.from_numpy(x.astype(ART_NUMPY_DTYPE)), + torch.from_numpy(targets.astype(ART_NUMPY_DTYPE)), ) data_loader = torch.utils.data.DataLoader( @@ -264,7 +265,13 @@ def _generate_batch( for i_max_iter in range(self.max_iter): self._i_max_iter = i_max_iter adv_x = self._compute_torch( - adv_x, inputs, targets, mask, eps, eps_step, self.num_random_init > 0 and i_max_iter == 0, + adv_x, + inputs, + targets, + mask, + eps, + eps_step, + self.num_random_init > 0 and i_max_iter == 0, ) return adv_x.cpu().detach().numpy() @@ -461,10 +468,13 @@ def _projection( "The parameter `eps` of type `np.ndarray` is not supported to use with norm 2." ) - values_tmp = values_tmp * torch.min( - torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), - eps / (torch.norm(values_tmp, p=2, dim=1) + tol), - ).unsqueeze_(-1) + values_tmp = ( + values_tmp + * torch.min( + torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), + eps / (torch.norm(values_tmp, p=2, dim=1) + tol), + ).unsqueeze_(-1) + ) elif norm_p == 1: if isinstance(eps, np.ndarray): @@ -472,10 +482,13 @@ def _projection( "The parameter `eps` of type `np.ndarray` is not supported to use with norm 1." ) - values_tmp = values_tmp * torch.min( - torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), - eps / (torch.norm(values_tmp, p=1, dim=1) + tol), - ).unsqueeze_(-1) + values_tmp = ( + values_tmp + * torch.min( + torch.tensor([1.0], dtype=torch.float32).to(self.estimator.device), + eps / (torch.norm(values_tmp, p=1, dim=1) + tol), + ).unsqueeze_(-1) + ) elif norm_p in [np.inf, "inf"]: if isinstance(eps, np.ndarray): diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 719e706e67..f58068ded7 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -148,7 +148,11 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n # those for the current batch. Otherwise (i.e. mask is meant to be broadcasted), keep it as it is. if len(mask.shape) == len(x.shape): dataset = tf.data.Dataset.from_tensor_slices( - (x.astype(ART_NUMPY_DTYPE), targets.astype(ART_NUMPY_DTYPE), mask.astype(ART_NUMPY_DTYPE),) + ( + x.astype(ART_NUMPY_DTYPE), + targets.astype(ART_NUMPY_DTYPE), + mask.astype(ART_NUMPY_DTYPE), + ) ).batch(self.batch_size, drop_remainder=False) else: @@ -162,7 +166,10 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n else: dataset = tf.data.Dataset.from_tensor_slices( - (x.astype(ART_NUMPY_DTYPE), targets.astype(ART_NUMPY_DTYPE),) + ( + x.astype(ART_NUMPY_DTYPE), + targets.astype(ART_NUMPY_DTYPE), + ) ).batch(self.batch_size, drop_remainder=False) # Start to compute adversarial examples @@ -252,7 +259,13 @@ def _generate_batch( for i_max_iter in range(self.max_iter): self._i_max_iter = i_max_iter adv_x = self._compute_tf( - adv_x, x, targets, mask, eps, eps_step, self.num_random_init > 0 and i_max_iter == 0, + adv_x, + x, + targets, + mask, + eps, + eps_step, + self.num_random_init > 0 and i_max_iter == 0, ) return adv_x @@ -306,7 +319,9 @@ def _compute_perturbation( # pylint: disable=W0221 for key, value in losses.items(): self.summary_writer.add_scalar( - "loss/{}/batch-{}".format(key, self._batch_id), np.mean(value), global_step=self._i_max_iter, + "loss/{}/batch-{}".format(key, self._batch_id), + np.mean(value), + global_step=self._i_max_iter, ) # Check for NaN before normalisation an replace with 0 diff --git a/art/attacks/evasion/saliency_map.py b/art/attacks/evasion/saliency_map.py index 2164616d52..db7645cfbf 100644 --- a/art/attacks/evasion/saliency_map.py +++ b/art/attacks/evasion/saliency_map.py @@ -153,10 +153,12 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n # Update adversarial examples tmp_batch = batch[active_indices] tmp_batch[np.arange(len(active_indices)), feat_ind[:, 0]] = clip_func( - clip_value, tmp_batch[np.arange(len(active_indices)), feat_ind[:, 0]] + self.theta, + clip_value, + tmp_batch[np.arange(len(active_indices)), feat_ind[:, 0]] + self.theta, ) tmp_batch[np.arange(len(active_indices)), feat_ind[:, 1]] = clip_func( - clip_value, tmp_batch[np.arange(len(active_indices)), feat_ind[:, 1]] + self.theta, + clip_value, + tmp_batch[np.arange(len(active_indices)), feat_ind[:, 1]] + self.theta, ) batch[active_indices] = tmp_batch @@ -171,7 +173,10 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n batch[active_indices] = tmp_batch # Recompute model prediction - current_pred = np.argmax(self.estimator.predict(np.reshape(batch, [batch.shape[0]] + dims)), axis=1,) + current_pred = np.argmax( + self.estimator.predict(np.reshape(batch, [batch.shape[0]] + dims)), + axis=1, + ) # Update active_indices active_indices = np.where( diff --git a/art/attacks/evasion/shapeshifter.py b/art/attacks/evasion/shapeshifter.py index b5f997c197..9b2316b939 100644 --- a/art/attacks/evasion/shapeshifter.py +++ b/art/attacks/evasion/shapeshifter.py @@ -663,7 +663,10 @@ def _create_optimizer(self) -> "Optimizer": return optimizer def _create_attack_loss( - self, initial_input: "Tensor", current_value: "Tensor", custom_loss: Optional["Tensor"] = None, + self, + initial_input: "Tensor", + current_value: "Tensor", + custom_loss: Optional["Tensor"] = None, ) -> "Tensor": """ Create the loss tensor of this attack. @@ -892,7 +895,12 @@ def _create_rpn_loss(self) -> "Tensor": # Compute partial loss partial_loss = tf.add_n( - [weight_rpn_background_loss, weight_rpn_foreground_loss, weight_rpn_cw_loss,], name="partial_rpn_loss", + [ + weight_rpn_background_loss, + weight_rpn_foreground_loss, + weight_rpn_cw_loss, + ], + name="partial_rpn_loss", ) return partial_loss diff --git a/art/attacks/evasion/spatial_transformation.py b/art/attacks/evasion/spatial_transformation.py index 7a781a70b4..b74b1094e6 100644 --- a/art/attacks/evasion/spatial_transformation.py +++ b/art/attacks/evasion/spatial_transformation.py @@ -123,11 +123,23 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n grid_trans_x = [ int(round(g)) - for g in list(np.linspace(-max_num_pixel_trans_x, max_num_pixel_trans_x, num=self.num_translations,)) + for g in list( + np.linspace( + -max_num_pixel_trans_x, + max_num_pixel_trans_x, + num=self.num_translations, + ) + ) ] grid_trans_y = [ int(round(g)) - for g in list(np.linspace(-max_num_pixel_trans_y, max_num_pixel_trans_y, num=self.num_translations,)) + for g in list( + np.linspace( + -max_num_pixel_trans_y, + max_num_pixel_trans_y, + num=self.num_translations, + ) + ) ] grid_rot = list(np.linspace(-self.max_rotation, self.max_rotation, num=self.num_rotations)) @@ -180,7 +192,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n self.attack_rot = rot logger.info( - "Success rate of spatial transformation attack: %.2f%%", 100 * self.fooling_rate, + "Success rate of spatial transformation attack: %.2f%%", + 100 * self.fooling_rate, ) logger.info("Attack-translation in x: %.2f%%", self.attack_trans_x) logger.info("Attack-translation in y: %.2f%%", self.attack_trans_y) @@ -203,7 +216,10 @@ def _perturb(self, x: np.ndarray, trans_x: int, trans_y: int, rot: float) -> np. if self.estimator.clip_values is not None: np.clip( - x_adv, self.estimator.clip_values[0], self.estimator.clip_values[1], out=x_adv, + x_adv, + self.estimator.clip_values[0], + self.estimator.clip_values[1], + out=x_adv, ) return x_adv diff --git a/art/attacks/evasion/virtual_adversarial.py b/art/attacks/evasion/virtual_adversarial.py index 0a88f528a1..95787fe528 100644 --- a/art/attacks/evasion/virtual_adversarial.py +++ b/art/attacks/evasion/virtual_adversarial.py @@ -133,7 +133,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n from scipy.stats import entropy kl_div1 = entropy( - np.transpose(preds_rescaled[batch_index_1:batch_index_2]), np.transpose(preds_new_rescaled), + np.transpose(preds_rescaled[batch_index_1:batch_index_2]), + np.transpose(preds_new_rescaled), ) var_d_new = np.zeros(var_d.shape).astype(ART_NUMPY_DTYPE) @@ -149,7 +150,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n preds_new_rescaled = preds_new kl_div2 = entropy( - np.transpose(preds_rescaled[batch_index_1:batch_index_2]), np.transpose(preds_new_rescaled), + np.transpose(preds_rescaled[batch_index_1:batch_index_2]), + np.transpose(preds_new_rescaled), ) var_d_new[:, current_index] = (kl_div2 - kl_div1) / self.finite_diff var_d[:, current_index] -= self.finite_diff diff --git a/art/attacks/evasion/wasserstein.py b/art/attacks/evasion/wasserstein.py index 91b5d51bf2..90dc46a8a3 100644 --- a/art/attacks/evasion/wasserstein.py +++ b/art/attacks/evasion/wasserstein.py @@ -525,7 +525,15 @@ def _projected_sinkhorn( # Check for convergence next_convergence = self._projected_sinkhorn_evaluation( - x, x_init, alpha, exp_alpha, beta, exp_beta, psi, var_k, eps, + x, + x_init, + alpha, + exp_alpha, + beta, + exp_beta, + psi, + var_k, + eps, ) if (np.abs(convergence - next_convergence) <= 1e-4 + 1e-4 * np.abs(next_convergence)).all(): diff --git a/art/attacks/evasion/zoo.py b/art/attacks/evasion/zoo.py index f25c04c4f2..26199a8c5b 100644 --- a/art/attacks/evasion/zoo.py +++ b/art/attacks/evasion/zoo.py @@ -187,7 +187,10 @@ def _loss( ] preds = self.estimator.predict(np.array(zoom(x_adv, zoom=ratios)), batch_size=self.batch_size) z_target = np.sum(preds * target, axis=1) - z_other = np.max(preds * (1 - target) + (np.min(preds, axis=1) - 1)[:, np.newaxis] * target, axis=1,) + z_other = np.max( + preds * (1 - target) + (np.min(preds, axis=1) - 1)[:, np.newaxis] * target, + axis=1, + ) if self.targeted: # If targeted, optimize for making the target class most likely @@ -266,7 +269,10 @@ def _generate_batch(self, x_batch: np.ndarray, y_batch: np.ndarray) -> np.ndarra # Start with a binary search for bss in range(self.binary_search_steps): logger.debug( - "Binary search step %i out of %i (c_mean==%f)", bss, self.binary_search_steps, np.mean(c_current), + "Binary search step %i out of %i (c_mean==%f)", + bss, + self.binary_search_steps, + np.mean(c_current), ) # Run with 1 specific binary search step @@ -419,12 +425,22 @@ def compare(object1, object2): if not self.estimator.channels_first: best_attack = zoom( best_attack, - [1, int(x_batch.shape[1]) / best_attack.shape[1], int(x_batch.shape[2]) / best_attack.shape[2], 1,], + [ + 1, + int(x_batch.shape[1]) / best_attack.shape[1], + int(x_batch.shape[2]) / best_attack.shape[2], + 1, + ], ) else: best_attack = zoom( best_attack, - [1, 1, int(x_batch.shape[2]) / best_attack.shape[2], int(x_batch.shape[2]) / best_attack.shape[3],], + [ + 1, + 1, + int(x_batch.shape[2]) / best_attack.shape[2], + int(x_batch.shape[2]) / best_attack.shape[3], + ], ) return best_dist, best_label, best_attack @@ -474,7 +490,10 @@ def _optimizer(self, x: np.ndarray, targets: np.ndarray, c_batch: np.ndarray) -> expanded_targets = np.repeat(targets, 2 * self.nb_parallel, axis=0).reshape((-1,) + targets.shape[1:]) expanded_c = np.repeat(c_batch, 2 * self.nb_parallel) _, _, loss = self._loss( - expanded_x, expanded_x + coord_batch.reshape(expanded_x.shape), expanded_targets, expanded_c, + expanded_x, + expanded_x + coord_batch.reshape(expanded_x.shape), + expanded_targets, + expanded_c, ) self._current_noise = self._optimizer_adam_coordinate( loss, @@ -560,7 +579,15 @@ def _resize_image(self, x: np.ndarray, size_x: int, size_y: int, reset: bool = F else: self._current_noise = np.zeros(x.shape, dtype=ART_NUMPY_DTYPE) else: - resized_x = zoom(x, (1, dims[1] / x.shape[1], dims[2] / x.shape[2], dims[3] / x.shape[3],),) + resized_x = zoom( + x, + ( + 1, + dims[1] / x.shape[1], + dims[2] / x.shape[2], + dims[3] / x.shape[3], + ), + ) self._current_noise = np.zeros(dims, dtype=ART_NUMPY_DTYPE) self._sample_prob = np.ones(nb_vars, dtype=ART_NUMPY_DTYPE) / nb_vars else: @@ -609,7 +636,9 @@ def _max_pooling(image: np.ndarray, kernel_size: int) -> np.ndarray: for i in range(0, image.shape[1], kernel_size): for j in range(0, image.shape[2], kernel_size): img_pool[:, i : i + kernel_size, j : j + kernel_size] = np.max( - image[:, i : i + kernel_size, j : j + kernel_size], axis=(1, 2), keepdims=True, + image[:, i : i + kernel_size, j : j + kernel_size], + axis=(1, 2), + keepdims=True, ) return img_pool diff --git a/art/attacks/extraction/copycat_cnn.py b/art/attacks/extraction/copycat_cnn.py index 607910cb24..3ac93f0736 100644 --- a/art/attacks/extraction/copycat_cnn.py +++ b/art/attacks/extraction/copycat_cnn.py @@ -117,7 +117,10 @@ def extract(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> "C # Train the thieved classifier thieved_classifier.fit( # type: ignore - x=selected_x, y=fake_labels, batch_size=self.batch_size_fit, nb_epochs=self.nb_epochs, + x=selected_x, + y=fake_labels, + batch_size=self.batch_size_fit, + nb_epochs=self.nb_epochs, ) return thieved_classifier # type: ignore diff --git a/art/attacks/extraction/functionally_equivalent_extraction.py b/art/attacks/extraction/functionally_equivalent_extraction.py index e2df61a84d..64cbcd469f 100644 --- a/art/attacks/extraction/functionally_equivalent_extraction.py +++ b/art/attacks/extraction/functionally_equivalent_extraction.py @@ -117,7 +117,10 @@ def extract( # pylint: disable=W0221 :return: ART :class:`.BlackBoxClassifier` of the extracted model. """ self._critical_point_search( - delta_0=delta_0, fraction_true=fraction_true, rel_diff_slope=rel_diff_slope, rel_diff_value=rel_diff_value, + delta_0=delta_0, + fraction_true=fraction_true, + rel_diff_slope=rel_diff_slope, + rel_diff_value=rel_diff_value, ) self._weight_recovery( delta_init_value=delta_init_value, @@ -173,7 +176,11 @@ def _get_x(self, var_t: float) -> np.ndarray: return self.vector_u + var_t * self.vector_v def _critical_point_search( - self, delta_0: float, fraction_true: float, rel_diff_slope: float, rel_diff_value: float, + self, + delta_0: float, + fraction_true: float, + rel_diff_slope: float, + rel_diff_value: float, ) -> None: """ Search for critical points. @@ -247,7 +254,12 @@ def _critical_point_search( ) def _weight_recovery( - self, delta_init_value: float, delta_value_max: float, d2_min: float, d_step: float, delta_sign: float, + self, + delta_init_value: float, + delta_value_max: float, + d2_min: float, + d_step: float, + delta_sign: float, ) -> None: """ Recover the weights and biases of the first layer. @@ -458,12 +470,19 @@ def f_w_1_b_1(w_1_b_1_i): model.compile( loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), - optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001,), + optimizer=tf.keras.optimizers.Adam( + learning_rate=0.0001, + ), metrics=["accuracy"], ) model.fit( - x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test), + x_train, + y_train, + batch_size=batch_size, + epochs=epochs, + verbose=1, + validation_data=(x_test, y_test), ) model.save("./model.h5") diff --git a/art/attacks/extraction/knockoff_nets.py b/art/attacks/extraction/knockoff_nets.py index 335ba6cd69..c5b50169ab 100644 --- a/art/attacks/extraction/knockoff_nets.py +++ b/art/attacks/extraction/knockoff_nets.py @@ -151,7 +151,11 @@ def _random_extraction(self, x: np.ndarray, thieved_classifier: "CLASSIFIER_TYPE # Train the thieved classifier thieved_classifier.fit( - x=selected_x, y=fake_labels, batch_size=self.batch_size_fit, nb_epochs=self.nb_epochs, verbose=0, + x=selected_x, + y=fake_labels, + batch_size=self.batch_size_fit, + nb_epochs=self.nb_epochs, + verbose=0, ) return thieved_classifier @@ -235,7 +239,11 @@ def _adaptive_extraction( # Train the thieved classifier thieved_classifier.fit( - x=np.array([sampled_x]), y=fake_label, batch_size=self.batch_size_fit, nb_epochs=1, verbose=0, + x=np.array([sampled_x]), + y=fake_label, + batch_size=self.batch_size_fit, + nb_epochs=1, + verbose=0, ) # Test new labels diff --git a/art/attacks/inference/attribute_inference/baseline.py b/art/attacks/inference/attribute_inference/baseline.py index 7f1b6a3f7a..b09488719f 100644 --- a/art/attacks/inference/attribute_inference/baseline.py +++ b/art/attacks/inference/attribute_inference/baseline.py @@ -47,7 +47,9 @@ class AttributeInferenceBaseline(AttributeInferenceAttack): _estimator_requirements = () def __init__( - self, attack_model: Optional["CLASSIFIER_TYPE"] = None, attack_feature: Union[int, slice] = 0, + self, + attack_model: Optional["CLASSIFIER_TYPE"] = None, + attack_feature: Union[int, slice] = 0, ): """ Create an AttributeInferenceBaseline attack instance. diff --git a/art/attacks/inference/membership_inference/black_box.py b/art/attacks/inference/membership_inference/black_box.py index 79a8659b6e..9c33bac9c9 100644 --- a/art/attacks/inference/membership_inference/black_box.py +++ b/art/attacks/inference/membership_inference/black_box.py @@ -121,10 +121,15 @@ def __init__(self, num_classes, num_features=None): ) self.labels = nn.Sequential( - nn.Linear(self.num_classes, 256), nn.ReLU(), nn.Linear(256, 64), nn.ReLU(), + nn.Linear(self.num_classes, 256), + nn.ReLU(), + nn.Linear(256, 64), + nn.ReLU(), ) - self.combine = nn.Sequential(nn.Linear(64 * 2, 1),) + self.combine = nn.Sequential( + nn.Linear(64 * 2, 1), + ) self.output = nn.Sigmoid() diff --git a/art/attacks/inference/membership_inference/shadow_models.py b/art/attacks/inference/membership_inference/shadow_models.py index deab1a738d..b01e8be281 100644 --- a/art/attacks/inference/membership_inference/shadow_models.py +++ b/art/attacks/inference/membership_inference/shadow_models.py @@ -61,7 +61,10 @@ def __init__( self._rng = np.random.default_rng(seed=random_state) def generate_shadow_dataset( - self, x: np.ndarray, y: np.ndarray, member_ratio: float = 0.5, + self, + x: np.ndarray, + y: np.ndarray, + member_ratio: float = 0.5, ) -> Tuple[Tuple[np.ndarray, np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray]]: """ Generates a shadow dataset (member and nonmember samples and their corresponding model predictions) by splitting diff --git a/art/attacks/poisoning/poisoning_attack_svm.py b/art/attacks/poisoning/poisoning_attack_svm.py index 02b3324b81..405abf6c60 100644 --- a/art/attacks/poisoning/poisoning_attack_svm.py +++ b/art/attacks/poisoning/poisoning_attack_svm.py @@ -86,7 +86,8 @@ def __init__( if isinstance(self.estimator.model, LinearSVC): self._estimator = ScikitlearnSVC( - model=SVC(C=self.estimator.model.C, kernel="linear"), clip_values=self.estimator.clip_values, + model=SVC(C=self.estimator.model.C, kernel="linear"), + clip_values=self.estimator.clip_values, ) self.estimator.fit(x_train, y_train) elif not isinstance(self.estimator.model, SVC): @@ -235,7 +236,9 @@ def attack_gradient(self, attack_point: np.ndarray, tol: float = 0.0001) -> np.n q_ks = art_model.q_submatrix(np.array([x_k]), support_vectors) m_k = (1.0 / zeta) * np.matmul(q_ks, zeta * qss_inv - np.matmul(nu_k, nu_k.T)) + np.matmul(y_k, nu_k.T) d_q_sc = np.fromfunction( - lambda i: art_model._get_kernel_gradient_sv(i, attack_point), (len(support_vectors),), dtype=int, + lambda i: art_model._get_kernel_gradient_sv(i, attack_point), + (len(support_vectors),), + dtype=int, ) d_q_kc = art_model._kernel_grad(x_k, attack_point) grad += (np.matmul(m_k, d_q_sc) + d_q_kc) * alpha_c diff --git a/art/defences/detector/evasion/detector.py b/art/defences/detector/evasion/detector.py index 5c27cf8c46..a346b572ec 100644 --- a/art/defences/detector/evasion/detector.py +++ b/art/defences/detector/evasion/detector.py @@ -193,7 +193,10 @@ class BinaryActivationDetector( ) def __init__( - self, classifier: "ClassifierNeuralNetwork", detector: "ClassifierNeuralNetwork", layer: Union[int, str], + self, + classifier: "ClassifierNeuralNetwork", + detector: "ClassifierNeuralNetwork", + layer: Union[int, str], ) -> None: # lgtm [py/similar-function] """ Create a `BinaryActivationDetector` instance which performs binary classification on activation information. diff --git a/art/defences/detector/evasion/subsetscanning/scanner.py b/art/defences/detector/evasion/subsetscanning/scanner.py index d66508d80c..94bd37295a 100644 --- a/art/defences/detector/evasion/subsetscanning/scanner.py +++ b/art/defences/detector/evasion/subsetscanning/scanner.py @@ -138,22 +138,30 @@ def fgss_for_nets( prob = np.random.uniform(0, 1) if image_to_node: indices_of_seeds = np.random.choice( - np.arange(pvalues.shape[0]), int(pvalues.shape[0] * prob), replace=False, + np.arange(pvalues.shape[0]), + int(pvalues.shape[0] * prob), + replace=False, ) else: indices_of_seeds = np.random.choice( - np.arange(pvalues.shape[1]), int(pvalues.shape[1] * prob), replace=False, + np.arange(pvalues.shape[1]), + int(pvalues.shape[1] * prob), + replace=False, ) while indices_of_seeds.size == 0: # eventually will make non zero prob = np.random.uniform(0, 1) if image_to_node: indices_of_seeds = np.random.choice( - np.arange(pvalues.shape[0]), int(pvalues.shape[0] * prob), replace=False, + np.arange(pvalues.shape[0]), + int(pvalues.shape[0] * prob), + replace=False, ) else: indices_of_seeds = np.random.choice( - np.arange(pvalues.shape[1]), int(pvalues.shape[1] * prob), replace=False, + np.arange(pvalues.shape[1]), + int(pvalues.shape[1] * prob), + replace=False, ) indices_of_seeds.astype(int) diff --git a/art/defences/detector/evasion/subsetscanning/scanningops.py b/art/defences/detector/evasion/subsetscanning/scanningops.py index 653e132498..9bfc15378e 100644 --- a/art/defences/detector/evasion/subsetscanning/scanningops.py +++ b/art/defences/detector/evasion/subsetscanning/scanningops.py @@ -76,7 +76,9 @@ def optimize_in_single_dimension( arg_sort_max = np.argsort(pvalues[:, elem_indx, 1]) # arg_sort_min = np.argsort(pvalues[:,e,0]) #collect ranges over images(rows) completely_included = np.searchsorted( - pvalues[:, elem_indx, 1][arg_sort_max], alpha_thresholds, side="right", + pvalues[:, elem_indx, 1][arg_sort_max], + alpha_thresholds, + side="right", ) else: # collect ranges over nodes(columns) @@ -84,7 +86,9 @@ def optimize_in_single_dimension( # arg_sort_min = np.argsort(pvalues[elem_indx,:,0]) completely_included = np.searchsorted( - pvalues[elem_indx, :, 1][arg_sort_max], alpha_thresholds, side="right", + pvalues[elem_indx, :, 1][arg_sort_max], + alpha_thresholds, + side="right", ) # should be num elements by num thresh diff --git a/art/defences/detector/evasion/subsetscanning/scoring_functions.py b/art/defences/detector/evasion/subsetscanning/scoring_functions.py index c99ce9d0e4..34ba68b9cf 100644 --- a/art/defences/detector/evasion/subsetscanning/scoring_functions.py +++ b/art/defences/detector/evasion/subsetscanning/scoring_functions.py @@ -44,7 +44,10 @@ def get_score_bj_fast(n_alpha: list, no_records: list, alpha: np.ndarray) -> np. score[inds_tie] = no_records[inds_tie] * np.log(np.true_divide(1, alpha[inds_tie])) factor1 = n_alpha[inds_pos_not_tie] * np.log( - np.true_divide(n_alpha[inds_pos_not_tie], no_records[inds_pos_not_tie] * alpha[inds_pos_not_tie],) + np.true_divide( + n_alpha[inds_pos_not_tie], + no_records[inds_pos_not_tie] * alpha[inds_pos_not_tie], + ) ) factor2 = no_records[inds_pos_not_tie] - n_alpha[inds_pos_not_tie] diff --git a/art/defences/detector/poison/activation_defence.py b/art/defences/detector/poison/activation_defence.py index bb7fb3b2b5..4bfa8cc865 100644 --- a/art/defences/detector/poison/activation_defence.py +++ b/art/defences/detector/poison/activation_defence.py @@ -122,7 +122,10 @@ def evaluate_defence(self, is_clean: np.ndarray, **kwargs) -> str: activations = self._get_activations() self.activations_by_class = self._segment_by_class(activations, self.y_train) - (self.clusters_by_class, self.red_activations_by_class,) = self.cluster_activations() + ( + self.clusters_by_class, + self.red_activations_by_class, + ) = self.cluster_activations() _, self.assigned_clean_by_class = self.analyze_clusters() # Now check ground truth: @@ -200,7 +203,10 @@ def detect_poison(self, **kwargs) -> Tuple[Dict[str, Any], List[int]]: if not self.activations_by_class: activations = self._get_activations() self.activations_by_class = self._segment_by_class(activations, self.y_train) - (self.clusters_by_class, self.red_activations_by_class,) = self.cluster_activations() + ( + self.clusters_by_class, + self.red_activations_by_class, + ) = self.cluster_activations() report, self.assigned_clean_by_class = self.analyze_clusters() # Here, assigned_clean_by_class[i][j] is 1 if the jth data point in the ith class was # determined to be clean by activation cluster @@ -296,20 +302,26 @@ def analyze_clusters(self, **kwargs) -> Tuple[Dict[str, Any], np.ndarray]: analyzer = ClusteringAnalyzer() if self.cluster_analysis == "smaller": - (self.assigned_clean_by_class, self.poisonous_clusters, report,) = analyzer.analyze_by_size( - self.clusters_by_class - ) + ( + self.assigned_clean_by_class, + self.poisonous_clusters, + report, + ) = analyzer.analyze_by_size(self.clusters_by_class) elif self.cluster_analysis == "relative-size": - (self.assigned_clean_by_class, self.poisonous_clusters, report,) = analyzer.analyze_by_relative_size( - self.clusters_by_class - ) + ( + self.assigned_clean_by_class, + self.poisonous_clusters, + report, + ) = analyzer.analyze_by_relative_size(self.clusters_by_class) elif self.cluster_analysis == "distance": (self.assigned_clean_by_class, self.poisonous_clusters, report,) = analyzer.analyze_by_distance( - self.clusters_by_class, separated_activations=self.red_activations_by_class, + self.clusters_by_class, + separated_activations=self.red_activations_by_class, ) elif self.cluster_analysis == "silhouette-scores": (self.assigned_clean_by_class, self.poisonous_clusters, report,) = analyzer.analyze_by_silhouette_score( - self.clusters_by_class, reduced_activations_by_class=self.red_activations_by_class, + self.clusters_by_class, + reduced_activations_by_class=self.red_activations_by_class, ) else: raise ValueError("Unsupported cluster analysis technique " + self.cluster_analysis) diff --git a/art/defences/detector/poison/clustering_analyzer.py b/art/defences/detector/poison/clustering_analyzer.py index 6134e8a56c..fa65f45e8b 100644 --- a/art/defences/detector/poison/clustering_analyzer.py +++ b/art/defences/detector/poison/clustering_analyzer.py @@ -104,7 +104,9 @@ def analyze_by_size( return np.asarray(all_assigned_clean), summary_poison_clusters, report def analyze_by_distance( - self, separated_clusters: List[np.ndarray], separated_activations: List[np.ndarray], + self, + separated_clusters: List[np.ndarray], + separated_activations: List[np.ndarray], ) -> Tuple[np.ndarray, List[List[int]], Dict[str, int]]: """ Assigns a cluster as poisonous if its median activation is closer to the median activation for another class @@ -191,7 +193,10 @@ def analyze_by_distance( return all_assigned_clean, summary_poison_clusters, report def analyze_by_relative_size( - self, separated_clusters: List[np.ndarray], size_threshold: float = 0.35, r_size: int = 2, + self, + separated_clusters: List[np.ndarray], + size_threshold: float = 0.35, + r_size: int = 2, ) -> Tuple[np.ndarray, List[List[int]], Dict[str, int]]: """ Assigns a cluster as poisonous if the smaller one contains less than threshold of the data. @@ -313,7 +318,9 @@ def analyze_by_silhouette_score( # Generate report for class silhouette_avg = round(silhouette_score(activations, clusters), r_silhouette) dict_i: Dict[str, Any] = dict( - sizes_clusters=str(bins), ptc_cluster=str(percentages), avg_silhouette_score=str(silhouette_avg), + sizes_clusters=str(bins), + ptc_cluster=str(percentages), + avg_silhouette_score=str(silhouette_avg), ) if np.shape(poison_clusters)[1] != 0: diff --git a/art/defences/detector/poison/ground_truth_evaluator.py b/art/defences/detector/poison/ground_truth_evaluator.py index c3d2791489..959036f6e0 100644 --- a/art/defences/detector/poison/ground_truth_evaluator.py +++ b/art/defences/detector/poison/ground_truth_evaluator.py @@ -118,21 +118,53 @@ def get_confusion_matrix(self, values: np.ndarray) -> dict: fp_rate = self.calculate_and_print(false_positive, false_positive + true_negative, "false-positive rate") fn_rate = self.calculate_and_print(false_negative, true_positive + false_negative, "false-negative rate") - dic_tp = dict(rate=round(tp_rate, 2), numerator=true_positive, denominator=(true_positive + false_negative),) + dic_tp = dict( + rate=round(tp_rate, 2), + numerator=true_positive, + denominator=(true_positive + false_negative), + ) if (true_positive + false_negative) == 0: - dic_tp = dict(rate="N/A", numerator=true_positive, denominator=(true_positive + false_negative),) - - dic_tn = dict(rate=round(tn_rate, 2), numerator=true_negative, denominator=(false_positive + true_negative),) + dic_tp = dict( + rate="N/A", + numerator=true_positive, + denominator=(true_positive + false_negative), + ) + + dic_tn = dict( + rate=round(tn_rate, 2), + numerator=true_negative, + denominator=(false_positive + true_negative), + ) if (false_positive + true_negative) == 0: - dic_tn = dict(rate="N/A", numerator=true_negative, denominator=(false_positive + true_negative),) - - dic_fp = dict(rate=round(fp_rate, 2), numerator=false_positive, denominator=(false_positive + true_negative),) + dic_tn = dict( + rate="N/A", + numerator=true_negative, + denominator=(false_positive + true_negative), + ) + + dic_fp = dict( + rate=round(fp_rate, 2), + numerator=false_positive, + denominator=(false_positive + true_negative), + ) if (false_positive + true_negative) == 0: - dic_fp = dict(rate="N/A", numerator=false_positive, denominator=(false_positive + true_negative),) - - dic_fn = dict(rate=round(fn_rate, 2), numerator=false_negative, denominator=(true_positive + false_negative),) + dic_fp = dict( + rate="N/A", + numerator=false_positive, + denominator=(false_positive + true_negative), + ) + + dic_fn = dict( + rate=round(fn_rate, 2), + numerator=false_negative, + denominator=(true_positive + false_negative), + ) if (true_positive + false_negative) == 0: - dic_fn = dict(rate="N/A", numerator=false_negative, denominator=(true_positive + false_negative),) + dic_fn = dict( + rate="N/A", + numerator=false_negative, + denominator=(true_positive + false_negative), + ) dic_class.update(dict(TruePositive=dic_tp)) dic_class.update(dict(TrueNegative=dic_tn)) diff --git a/art/defences/detector/poison/provenance_defense.py b/art/defences/detector/poison/provenance_defense.py index eb6520ea6d..9264479c7b 100644 --- a/art/defences/detector/poison/provenance_defense.py +++ b/art/defences/detector/poison/provenance_defense.py @@ -174,7 +174,11 @@ def detect_poison_partially_trusted(self, **kwargs) -> Dict[int, float]: filtered_model.fit(filtered_data, filtered_labels) var_w = performance_diff( - filtered_model, unfiltered_model, self.x_val, self.y_val, perf_function=self.perf_func, + filtered_model, + unfiltered_model, + self.x_val, + self.y_val, + perf_function=self.perf_func, ) if self.eps < var_w: suspected[device_idx] = var_w @@ -192,9 +196,14 @@ def detect_poison_untrusted(self, **kwargs) -> Dict[int, float]: self.set_params(**kwargs) suspected = {} - (train_data, valid_data, train_labels, valid_labels, train_prov, valid_prov,) = train_test_split( - self.x_train, self.y_train, self.p_train, test_size=self.pp_valid - ) + ( + train_data, + valid_data, + train_labels, + valid_labels, + train_prov, + valid_prov, + ) = train_test_split(self.x_train, self.y_train, self.p_train, test_size=self.pp_valid) train_segments = segment_by_class(train_data, train_prov, self.num_devices) valid_segments = segment_by_class(valid_data, valid_prov, self.num_devices) diff --git a/art/defences/detector/poison/roni.py b/art/defences/detector/poison/roni.py index 8eef1362b1..7ce5ae77f0 100644 --- a/art/defences/detector/poison/roni.py +++ b/art/defences/detector/poison/roni.py @@ -151,7 +151,11 @@ def detect_poison(self, **kwargs) -> Tuple[dict, List[int]]: after_classifier = deepcopy(before_classifier) after_classifier.fit(x=np.vstack([x_trusted, x_i]), y=np.vstack([y_trusted, y_i])) acc_shift = performance_diff( - before_classifier, after_classifier, self.x_quiz, self.y_quiz, perf_function=self.perf_func, + before_classifier, + after_classifier, + self.x_quiz, + self.y_quiz, + perf_function=self.perf_func, ) # print(acc_shift, median, std_dev) if self.is_suspicious(before_classifier, acc_shift): @@ -193,7 +197,11 @@ def get_calibration_info(self, before_classifier: "CLASSIFIER_TYPE") -> Tuple[np after_classifier.fit(x=np.vstack([self.x_val, x_c]), y=np.vstack([self.y_val, y_c])) accs.append( performance_diff( - before_classifier, after_classifier, self.x_quiz, self.y_quiz, perf_function=self.perf_func, + before_classifier, + after_classifier, + self.x_quiz, + self.y_quiz, + perf_function=self.perf_func, ) ) diff --git a/art/defences/detector/poison/spectral_signature_defense.py b/art/defences/detector/poison/spectral_signature_defense.py index 753f20aa8b..b0a2083aae 100644 --- a/art/defences/detector/poison/spectral_signature_defense.py +++ b/art/defences/detector/poison/spectral_signature_defense.py @@ -134,7 +134,9 @@ def detect_poison(self, **kwargs) -> Tuple[dict, List[int]]: keep_by_class.append([True]) base_indices_by_class = segment_by_class( - np.arange(self.y_train.shape[0]), self.y_train, self.classifier.nb_classes, + np.arange(self.y_train.shape[0]), + self.y_train, + self.classifier.nb_classes, ) is_clean_lst = [0] * self.y_train.shape[0] report = {} diff --git a/art/defences/postprocessor/reverse_sigmoid.py b/art/defences/postprocessor/reverse_sigmoid.py index 9a537ce9f5..06d4a43bf5 100644 --- a/art/defences/postprocessor/reverse_sigmoid.py +++ b/art/defences/postprocessor/reverse_sigmoid.py @@ -37,7 +37,11 @@ class ReverseSigmoid(Postprocessor): params = ["beta", "gamma"] def __init__( - self, beta: float = 1.0, gamma: float = 0.1, apply_fit: bool = False, apply_predict: bool = True, + self, + beta: float = 1.0, + gamma: float = 0.1, + apply_fit: bool = False, + apply_predict: bool = True, ) -> None: """ Create a ReverseSigmoid postprocessor. diff --git a/art/defences/preprocessor/feature_squeezing.py b/art/defences/preprocessor/feature_squeezing.py index 0a3cf35d61..4bb09d4909 100644 --- a/art/defences/preprocessor/feature_squeezing.py +++ b/art/defences/preprocessor/feature_squeezing.py @@ -51,7 +51,11 @@ class FeatureSqueezing(Preprocessor): params = ["clip_values", "bit_depth"] def __init__( - self, clip_values: CLIP_VALUES_TYPE, bit_depth: int = 8, apply_fit: bool = False, apply_predict: bool = True, + self, + clip_values: CLIP_VALUES_TYPE, + bit_depth: int = 8, + apply_fit: bool = False, + apply_predict: bool = True, ) -> None: """ Create an instance of feature squeezing. diff --git a/art/defences/preprocessor/label_smoothing.py b/art/defences/preprocessor/label_smoothing.py index 9720381e94..add97946ba 100644 --- a/art/defences/preprocessor/label_smoothing.py +++ b/art/defences/preprocessor/label_smoothing.py @@ -51,7 +51,12 @@ class LabelSmoothing(Preprocessor): params = ["max_value"] - def __init__(self, max_value: float = 0.9, apply_fit: bool = True, apply_predict: bool = False,) -> None: + def __init__( + self, + max_value: float = 0.9, + apply_fit: bool = True, + apply_predict: bool = False, + ) -> None: """ Create an instance of label smoothing. diff --git a/art/defences/preprocessor/pixel_defend.py b/art/defences/preprocessor/pixel_defend.py index 0cd2afd864..cfce7449ff 100644 --- a/art/defences/preprocessor/pixel_defend.py +++ b/art/defences/preprocessor/pixel_defend.py @@ -112,7 +112,10 @@ def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.nd for feat_index in range(x.shape[1]): # Setup the search space f_probs = probs[i, feat_index, :] - f_range = range(int(max(x_i[feat_index] - self.eps, 0)), int(min(x_i[feat_index] + self.eps, 255) + 1),) + f_range = range( + int(max(x_i[feat_index] - self.eps, 0)), + int(min(x_i[feat_index] + self.eps, 255) + 1), + ) # Look in the search space best_prob = -1 diff --git a/art/defences/trainer/adversarial_trainer_fbf.py b/art/defences/trainer/adversarial_trainer_fbf.py index 0a3ad11724..ce0d259593 100644 --- a/art/defences/trainer/adversarial_trainer_fbf.py +++ b/art/defences/trainer/adversarial_trainer_fbf.py @@ -46,7 +46,9 @@ class AdversarialTrainerFBF(Trainer, abc.ABC): """ def __init__( - self, classifier: "CLASSIFIER_LOSS_GRADIENTS_TYPE", eps: Union[int, float] = 8, + self, + classifier: "CLASSIFIER_LOSS_GRADIENTS_TYPE", + eps: Union[int, float] = 8, ): """ Create an :class:`.AdversarialTrainerFBF` instance. diff --git a/art/defences/trainer/adversarial_trainer_madry_pgd.py b/art/defences/trainer/adversarial_trainer_madry_pgd.py index 509b9825ae..6daf58e4d1 100644 --- a/art/defences/trainer/adversarial_trainer_madry_pgd.py +++ b/art/defences/trainer/adversarial_trainer_madry_pgd.py @@ -81,7 +81,11 @@ def __init__( # Setting up adversary and perform adversarial training: self.attack = ProjectedGradientDescent( - classifier, eps=eps, eps_step=eps_step, max_iter=max_iter, num_random_init=num_random_init, + classifier, + eps=eps, + eps_step=eps_step, + max_iter=max_iter, + num_random_init=num_random_init, ) self.trainer = AdversarialTrainer(classifier, self.attack, ratio=1.0) # type: ignore diff --git a/art/defences/transformer/poisoning/strip.py b/art/defences/transformer/poisoning/strip.py index 438cc6ef74..6bc7b04ed7 100644 --- a/art/defences/transformer/poisoning/strip.py +++ b/art/defences/transformer/poisoning/strip.py @@ -60,7 +60,9 @@ def __init__(self, classifier: "CLASSIFIER_TYPE"): self._check_params() def __call__( # type: ignore - self, num_samples: int = 20, false_acceptance_rate: float = 0.01, + self, + num_samples: int = 20, + false_acceptance_rate: float = 0.01, ) -> "ClassifierWithStrip": """ Create a STRIP defense diff --git a/art/estimators/certification/randomized_smoothing/randomized_smoothing.py b/art/estimators/certification/randomized_smoothing/randomized_smoothing.py index a407285398..4c06ed9b3a 100644 --- a/art/estimators/certification/randomized_smoothing/randomized_smoothing.py +++ b/art/estimators/certification/randomized_smoothing/randomized_smoothing.py @@ -44,7 +44,14 @@ class RandomizedSmoothingMixin(ABC): | Paper link: https://arxiv.org/abs/1902.02918 """ - def __init__(self, sample_size: int, *args, scale: float = 0.1, alpha: float = 0.001, **kwargs,) -> None: + def __init__( + self, + sample_size: int, + *args, + scale: float = 0.1, + alpha: float = 0.001, + **kwargs, + ) -> None: """ Create a randomized smoothing wrapper. diff --git a/art/estimators/classification/keras.py b/art/estimators/classification/keras.py index 6fa7f6c233..69b6fc2f29 100644 --- a/art/estimators/classification/keras.py +++ b/art/estimators/classification/keras.py @@ -129,7 +129,11 @@ def __init__( self._initialize_params(model, use_logits, input_layer, output_layer) def _initialize_params( - self, model: KERAS_MODEL_TYPE, use_logits: bool, input_layer: int, output_layer: int, + self, + model: KERAS_MODEL_TYPE, + use_logits: bool, + input_layer: int, + output_layer: int, ): """ Initialize most parameters of the classifier. This is a convenience function called by `__init__` and @@ -181,7 +185,9 @@ def _initialize_params( self._input_shape = k.int_shape(self._input)[1:] logger.debug( - "Inferred %i classes and %s as input shape for Keras classifier.", self.nb_classes, str(self.input_shape), + "Inferred %i classes and %s as input shape for Keras classifier.", + self.nb_classes, + str(self.input_shape), ) self._use_logits = use_logits @@ -243,7 +249,12 @@ def _initialize_params( if ( "__name__" in dir(loss_function) and loss_function.__name__ - in ["categorical_hinge", "categorical_crossentropy", "binary_crossentropy", "kullback_leibler_divergence",] + in [ + "categorical_hinge", + "categorical_crossentropy", + "binary_crossentropy", + "kullback_leibler_divergence", + ] ) or flag_is_instance: self._reduce_labels = False label_ph = k.placeholder(shape=self._output.shape) @@ -251,7 +262,11 @@ def _initialize_params( "__name__" in dir(loss_function) and loss_function.__name__ in ["sparse_categorical_crossentropy"] ) or isinstance(loss_function, keras.losses.SparseCategoricalCrossentropy): self._reduce_labels = True - label_ph = k.placeholder(shape=[None,]) + label_ph = k.placeholder( + shape=[ + None, + ] + ) else: # pragma: no cover raise ValueError("Loss function not recognised.") @@ -578,7 +593,11 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg self.preprocessing is None or ( isinstance(self.preprocessing, StandardisationMeanStd) - and (self.preprocessing.mean, self.preprocessing.std,) == (0, 1) + and ( + self.preprocessing.mean, + self.preprocessing.std, + ) + == (0, 1) ) ): try: diff --git a/art/estimators/classification/mxnet.py b/art/estimators/classification/mxnet.py index 7840d4dbae..f7ec4d15cc 100644 --- a/art/estimators/classification/mxnet.py +++ b/art/estimators/classification/mxnet.py @@ -52,7 +52,14 @@ class MXClassifier(ClassGradientsMixin, ClassifierMixin, MXEstimator): # lgtm [ estimator_params = ( MXEstimator.estimator_params + ClassifierMixin.estimator_params - + ["loss", "input_shape", "nb_classes", "optimizer", "ctx", "channels_first",] + + [ + "loss", + "input_shape", + "nb_classes", + "optimizer", + "ctx", + "channels_first", + ] ) def __init__( diff --git a/art/estimators/classification/pytorch.py b/art/estimators/classification/pytorch.py index 942a296af1..43117d98f7 100644 --- a/art/estimators/classification/pytorch.py +++ b/art/estimators/classification/pytorch.py @@ -59,7 +59,14 @@ class PyTorchClassifier(ClassGradientsMixin, ClassifierMixin, PyTorchEstimator): estimator_params = ( PyTorchEstimator.estimator_params + ClassifierMixin.estimator_params - + ["loss", "input_shape", "optimizer", "use_amp", "opt_level", "loss_scale",] + + [ + "loss", + "input_shape", + "optimizer", + "use_amp", + "opt_level", + "loss_scale", + ] ) def __init__( @@ -142,10 +149,16 @@ def __init__( # Index of layer at which the class gradients should be calculated self._layer_idx_gradients = -1 - if isinstance(self._loss, (torch.nn.CrossEntropyLoss, torch.nn.NLLLoss, torch.nn.MultiMarginLoss),): + if isinstance( + self._loss, + (torch.nn.CrossEntropyLoss, torch.nn.NLLLoss, torch.nn.MultiMarginLoss), + ): self._reduce_labels = True self._int_labels = True - elif isinstance(self._loss, (torch.nn.BCELoss),): + elif isinstance( + self._loss, + (torch.nn.BCELoss), + ): self._reduce_labels = True self._int_labels = False else: @@ -425,7 +438,11 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg self.preprocessing is None or ( isinstance(self.preprocessing, StandardisationMeanStdPyTorch) - and (self.preprocessing.mean, self.preprocessing.std,) == (0, 1) + and ( + self.preprocessing.mean, + self.preprocessing.std, + ) + == (0, 1) ) ): for _ in range(nb_epochs): @@ -590,18 +607,24 @@ def hook(grad): for i in range(num_outputs): torch.autograd.backward( - preds[:, i], torch.tensor([1.0] * len(preds[:, 0])).to(self._device), retain_graph=True, + preds[:, i], + torch.tensor([1.0] * len(preds[:, 0])).to(self._device), + retain_graph=True, ) elif isinstance(label, (int, np.integer)): torch.autograd.backward( - preds[:, label], torch.tensor([1.0] * len(preds[:, 0])).to(self._device), retain_graph=True, + preds[:, label], + torch.tensor([1.0] * len(preds[:, 0])).to(self._device), + retain_graph=True, ) else: unique_label = list(np.unique(label)) for i in unique_label: torch.autograd.backward( - preds[:, i], torch.tensor([1.0] * len(preds[:, 0])).to(self._device), retain_graph=True, + preds[:, i], + torch.tensor([1.0] * len(preds[:, 0])).to(self._device), + retain_graph=True, ) grads = np.swapaxes(np.array(grads), 0, 1) @@ -671,7 +694,10 @@ def compute_loss( # pylint: disable=W0221 return loss.detach().cpu().numpy() def compute_losses( - self, x: Union[np.ndarray, "torch.Tensor"], y: Union[np.ndarray, "torch.Tensor"], reduction: str = "none", + self, + x: Union[np.ndarray, "torch.Tensor"], + y: Union[np.ndarray, "torch.Tensor"], + reduction: str = "none", ) -> Dict[str, Union[np.ndarray, "torch.Tensor"]]: """ Compute all loss components. @@ -1020,7 +1046,8 @@ def get_layers(self) -> List[str]: else: # pragma: no cover raise TypeError("The input model must inherit from `nn.Module`.") logger.info( - "Inferred %i hidden layers on PyTorch classifier.", len(result), + "Inferred %i hidden layers on PyTorch classifier.", + len(result), ) return result diff --git a/art/estimators/classification/query_efficient_bb.py b/art/estimators/classification/query_efficient_bb.py index 128791f679..87c5e9a7f4 100644 --- a/art/estimators/classification/query_efficient_bb.py +++ b/art/estimators/classification/query_efficient_bb.py @@ -107,9 +107,15 @@ def _generate_samples(self, x: np.ndarray, epsilon_map: np.ndarray) -> Tuple[np. :return: Two arrays of new input samples to approximate gradient. """ minus = clip_and_round( - np.repeat(x, self.num_basis, axis=0) - epsilon_map, self.clip_values, self.round_samples, + np.repeat(x, self.num_basis, axis=0) - epsilon_map, + self.clip_values, + self.round_samples, + ) + plus = clip_and_round( + np.repeat(x, self.num_basis, axis=0) + epsilon_map, + self.clip_values, + self.round_samples, ) - plus = clip_and_round(np.repeat(x, self.num_basis, axis=0) + epsilon_map, self.clip_values, self.round_samples,) return minus, plus def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = None, **kwargs) -> np.ndarray: diff --git a/art/estimators/classification/scikitlearn.py b/art/estimators/classification/scikitlearn.py index a5e636a0d8..4817bd084c 100644 --- a/art/estimators/classification/scikitlearn.py +++ b/art/estimators/classification/scikitlearn.py @@ -90,7 +90,12 @@ def SklearnClassifier( # This basic class at least generically handles `fit`, `predict` and `save` return ScikitlearnClassifier( - model, clip_values, preprocessing_defences, postprocessing_defences, preprocessing, use_logits, + model, + clip_values, + preprocessing_defences, + postprocessing_defences, + preprocessing, + use_logits, ) @@ -194,7 +199,10 @@ def predict(self, x: np.ndarray, **kwargs) -> np.ndarray: elif callable(getattr(self.model, "predict_proba", None)): y_pred = self.model.predict_proba(x_preprocessed) elif callable(getattr(self.model, "predict", None)): - y_pred = to_categorical(self.model.predict(x_preprocessed), nb_classes=self.model.classes_.shape[0],) + y_pred = to_categorical( + self.model.predict(x_preprocessed), + nb_classes=self.model.classes_.shape[0], + ) else: # pragma: no cover raise ValueError("The provided model does not have methods `predict_proba` or `predict`.") @@ -897,7 +905,9 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: class_weight = np.ones(self.nb_classes) else: class_weight = compute_class_weight( - class_weight=self.model.class_weight, classes=self.model.classes_, y=y_index, + class_weight=self.model.class_weight, + classes=self.model.classes_, + y=y_index, ) y_pred = self.predict(x=x_preprocessed) @@ -1048,7 +1058,13 @@ def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = Non sign_multiplier = 1 if label is None: - gradients = np.zeros((x_preprocessed.shape[0], self.nb_classes, x_preprocessed.shape[1],)) + gradients = np.zeros( + ( + x_preprocessed.shape[0], + self.nb_classes, + x_preprocessed.shape[1], + ) + ) for i_label in range(self.nb_classes): # type: ignore for i_sample in range(num_samples): @@ -1059,16 +1075,24 @@ def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = Non else: label_multiplier = 1 - for label_sv in range(support_indices[i_label], support_indices[i_label + 1],): + for label_sv in range( + support_indices[i_label], + support_indices[i_label + 1], + ): alpha_i_k_y_i = self.model.dual_coef_[ - not_label if not_label < i_label else not_label - 1, label_sv, + not_label if not_label < i_label else not_label - 1, + label_sv, ] grad_kernel = self._get_kernel_gradient_sv(label_sv, x_preprocessed[i_sample]) gradients[i_sample, i_label] += label_multiplier * alpha_i_k_y_i * grad_kernel - for not_label_sv in range(support_indices[not_label], support_indices[not_label + 1],): + for not_label_sv in range( + support_indices[not_label], + support_indices[not_label + 1], + ): alpha_i_k_y_i = self.model.dual_coef_[ - i_label if i_label < not_label else i_label - 1, not_label_sv, + i_label if i_label < not_label else i_label - 1, + not_label_sv, ] grad_kernel = self._get_kernel_gradient_sv(not_label_sv, x_preprocessed[i_sample]) gradients[i_sample, i_label] += label_multiplier * alpha_i_k_y_i * grad_kernel @@ -1086,14 +1110,19 @@ def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = Non for label_sv in range(support_indices[label], support_indices[label + 1]): alpha_i_k_y_i = self.model.dual_coef_[ - not_label if not_label < label else not_label - 1, label_sv, + not_label if not_label < label else not_label - 1, + label_sv, ] grad_kernel = self._get_kernel_gradient_sv(label_sv, x_preprocessed[i_sample]) gradients[i_sample, 0] += label_multiplier * alpha_i_k_y_i * grad_kernel - for not_label_sv in range(support_indices[not_label], support_indices[not_label + 1],): + for not_label_sv in range( + support_indices[not_label], + support_indices[not_label + 1], + ): alpha_i_k_y_i = self.model.dual_coef_[ - label if label < not_label else label - 1, not_label_sv, + label if label < not_label else label - 1, + not_label_sv, ] grad_kernel = self._get_kernel_gradient_sv(not_label_sv, x_preprocessed[i_sample]) gradients[i_sample, 0] += label_multiplier * alpha_i_k_y_i * grad_kernel @@ -1114,15 +1143,20 @@ def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = Non label_multiplier = 1 for label_sv in range( - support_indices[label[i_sample]], support_indices[label[i_sample] + 1], + support_indices[label[i_sample]], + support_indices[label[i_sample] + 1], ): alpha_i_k_y_i = self.model.dual_coef_[ - not_label if not_label < label[i_sample] else not_label - 1, label_sv, + not_label if not_label < label[i_sample] else not_label - 1, + label_sv, ] grad_kernel = self._get_kernel_gradient_sv(label_sv, x_preprocessed[i_sample]) gradients[i_sample, 0] += label_multiplier * alpha_i_k_y_i * grad_kernel - for not_label_sv in range(support_indices[not_label], support_indices[not_label + 1],): + for not_label_sv in range( + support_indices[not_label], + support_indices[not_label + 1], + ): alpha_i_k_y_i = self.model.dual_coef_[ label[i_sample] if label[i_sample] < not_label else label[i_sample] - 1, not_label_sv, @@ -1137,7 +1171,13 @@ def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = Non elif isinstance(self.model, sklearn.svm.LinearSVC): if label is None: - gradients = np.zeros((x_preprocessed.shape[0], self.nb_classes, x_preprocessed.shape[1],)) + gradients = np.zeros( + ( + x_preprocessed.shape[0], + self.nb_classes, + x_preprocessed.shape[1], + ) + ) for i in range(self.nb_classes): # type: ignore for i_sample in range(num_samples): @@ -1270,7 +1310,10 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: grad_kernel = self._get_kernel_gradient_sv(i_label_sv, x_preprocessed[i_sample]) gradients[i_sample, :] += sign_multiplier * alpha_i_k_y_i * grad_kernel - for i_not_label_sv in range(support_indices[i_not_label], support_indices[i_not_label + 1],): + for i_not_label_sv in range( + support_indices[i_not_label], + support_indices[i_not_label + 1], + ): alpha_i_k_y_i = self.model.dual_coef_[i_not_label_i, i_not_label_sv] * label_multiplier grad_kernel = self._get_kernel_gradient_sv(i_not_label_sv, x_preprocessed[i_sample]) gradients[i_sample, :] += sign_multiplier * alpha_i_k_y_i * grad_kernel diff --git a/art/estimators/classification/tensorflow.py b/art/estimators/classification/tensorflow.py index bc25251091..070a442707 100644 --- a/art/estimators/classification/tensorflow.py +++ b/art/estimators/classification/tensorflow.py @@ -56,7 +56,16 @@ class TensorFlowClassifier(ClassGradientsMixin, ClassifierMixin, TensorFlowEstim estimator_params = ( TensorFlowEstimator.estimator_params + ClassifierMixin.estimator_params - + ["input_ph", "output", "labels_ph", "train", "loss", "learning", "sess", "feed_dict",] + + [ + "input_ph", + "output", + "labels_ph", + "train", + "loss", + "learning", + "sess", + "feed_dict", + ] ) def __init__( @@ -326,7 +335,11 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg self.preprocessing is None or ( isinstance(self.preprocessing, StandardisationMeanStd) - and (self.preprocessing.mean, self.preprocessing.std,) == (0, 1) + and ( + self.preprocessing.mean, + self.preprocessing.std, + ) + == (0, 1) ) ): for _ in range(nb_epochs): @@ -640,10 +653,13 @@ def save(self, filename: str, path: Optional[str] = None) -> None: builder = saved_model.builder.SavedModelBuilder(full_path) signature = predict_signature_def( - inputs={"SavedInputPhD": self.input_ph}, outputs={"SavedOutput": self.output}, + inputs={"SavedInputPhD": self.input_ph}, + outputs={"SavedOutput": self.output}, ) builder.add_meta_graph_and_variables( - sess=self._sess, tags=[tag_constants.SERVING], signature_def_map={"predict": signature}, + sess=self._sess, + tags=[tag_constants.SERVING], + signature_def_map={"predict": signature}, ) builder.save() @@ -786,7 +802,11 @@ class TensorFlowV2Classifier(ClassGradientsMixin, ClassifierMixin, TensorFlowV2E estimator_params = ( TensorFlowV2Estimator.estimator_params + ClassifierMixin.estimator_params - + ["input_shape", "loss_object", "train_step",] + + [ + "input_shape", + "loss_object", + "train_step", + ] ) def __init__( @@ -978,7 +998,11 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg self.preprocessing is None or ( isinstance(self.preprocessing, StandardisationMeanStdTensorFlow) - and (self.preprocessing.mean, self.preprocessing.std,) == (0, 1) + and ( + self.preprocessing.mean, + self.preprocessing.std, + ) + == (0, 1) ) ): for _ in range(nb_epochs): @@ -1127,7 +1151,10 @@ def compute_loss( # pylint: disable=W0221 return loss.numpy() def compute_losses( - self, x: Union[np.ndarray, "tf.Tensor"], y: Union[np.ndarray, "tf.Tensor"], reduction: str = "none", + self, + x: Union[np.ndarray, "tf.Tensor"], + y: Union[np.ndarray, "tf.Tensor"], + reduction: str = "none", ) -> Dict[str, Union[np.ndarray, "tf.Tensor"]]: """ Compute all loss components. diff --git a/art/estimators/classification/xgboost.py b/art/estimators/classification/xgboost.py index f0e9f546eb..7db44e805f 100644 --- a/art/estimators/classification/xgboost.py +++ b/art/estimators/classification/xgboost.py @@ -214,7 +214,10 @@ def get_trees(self) -> List["Tree"]: tree_json = json.loads(tree_dump) trees.append( - Tree(class_id=class_label, leaf_nodes=self._get_leaf_nodes(tree_json, i_tree, class_label, box),) + Tree( + class_id=class_label, + leaf_nodes=self._get_leaf_nodes(tree_json, i_tree, class_label, box), + ) ) return trees @@ -253,7 +256,13 @@ def _get_leaf_nodes(self, node, i_tree, class_label, box) -> List["LeafNode"]: if "leaf" in node: leaf_nodes.append( - LeafNode(tree_id=i_tree, class_label=class_label, node_id=node["nodeid"], box=box, value=node["leaf"],) + LeafNode( + tree_id=i_tree, + class_label=class_label, + node_id=node["nodeid"], + box=box, + value=node["leaf"], + ) ) return leaf_nodes diff --git a/art/estimators/object_detection/python_object_detector.py b/art/estimators/object_detection/python_object_detector.py index c5af012feb..cda10e4159 100644 --- a/art/estimators/object_detection/python_object_detector.py +++ b/art/estimators/object_detection/python_object_detector.py @@ -52,7 +52,12 @@ def __init__( preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, preprocessing: "PREPROCESSING_TYPE" = None, - attack_losses: Tuple[str, ...] = ("loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg",), + attack_losses: Tuple[str, ...] = ( + "loss_classifier", + "loss_box_reg", + "loss_objectness", + "loss_rpn_box_reg", + ), device_type: str = "gpu", ): """ diff --git a/art/estimators/object_detection/pytorch_faster_rcnn.py b/art/estimators/object_detection/pytorch_faster_rcnn.py index 0c3f3ef367..f9d5f9d089 100644 --- a/art/estimators/object_detection/pytorch_faster_rcnn.py +++ b/art/estimators/object_detection/pytorch_faster_rcnn.py @@ -48,7 +48,12 @@ def __init__( preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, preprocessing: "PREPROCESSING_TYPE" = None, - attack_losses: Tuple[str, ...] = ("loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg",), + attack_losses: Tuple[str, ...] = ( + "loss_classifier", + "loss_box_reg", + "loss_objectness", + "loss_rpn_box_reg", + ), device_type: str = "gpu", ): """ diff --git a/art/estimators/pytorch.py b/art/estimators/pytorch.py index b48c7dfdc6..f8c562b7aa 100644 --- a/art/estimators/pytorch.py +++ b/art/estimators/pytorch.py @@ -37,7 +37,11 @@ class PyTorchEstimator(NeuralNetworkMixin, LossGradientsMixin, BaseEstimator): """ estimator_params = ( - BaseEstimator.estimator_params + NeuralNetworkMixin.estimator_params + ["device_type",] + BaseEstimator.estimator_params + + NeuralNetworkMixin.estimator_params + + [ + "device_type", + ] ) def __init__(self, device_type: str = "gpu", **kwargs) -> None: diff --git a/art/estimators/speech_recognition/pytorch_deep_speech.py b/art/estimators/speech_recognition/pytorch_deep_speech.py index c3f23f743e..24fcf7ddea 100644 --- a/art/estimators/speech_recognition/pytorch_deep_speech.py +++ b/art/estimators/speech_recognition/pytorch_deep_speech.py @@ -321,7 +321,11 @@ def __init__( enabled = True self._model, self._optimizer = amp.initialize( - models=self._model, optimizers=self._optimizer, enabled=enabled, opt_level=opt_level, loss_scale=1.0, + models=self._model, + optimizers=self._optimizer, + enabled=enabled, + opt_level=opt_level, + loss_scale=1.0, ) def predict( @@ -612,7 +616,9 @@ def compute_loss_and_decoded_output( # Transform data into the model input space inputs, targets, input_rates, target_sizes, batch_idx = self._preprocess_transform_model_input( - x=masked_adv_input.to(self.device), y=original_output, real_lengths=real_lengths, + x=masked_adv_input.to(self.device), + y=original_output, + real_lengths=real_lengths, ) # Compute real input sizes @@ -646,7 +652,10 @@ def compute_loss_and_decoded_output( return loss, decoded_output def _preprocess_transform_model_input( - self, x: "torch.Tensor", y: np.ndarray, real_lengths: np.ndarray, + self, + x: "torch.Tensor", + y: np.ndarray, + real_lengths: np.ndarray, ) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", List]: """ Apply preprocessing and then transform the user input space into the model input space. This function is used @@ -677,7 +686,11 @@ def _preprocess_transform_model_input( # Transform the input space inputs, targets, input_rates, target_sizes, batch_idx = self._transform_model_input( - x=x, y=y, compute_gradient=False, tensor_input=True, real_lengths=real_lengths, + x=x, + y=y, + compute_gradient=False, + tensor_input=True, + real_lengths=real_lengths, ) return inputs, targets, input_rates, target_sizes, batch_idx diff --git a/art/estimators/speech_recognition/pytorch_espresso.py b/art/estimators/speech_recognition/pytorch_espresso.py index 92574c01a2..f19b6f7ded 100644 --- a/art/estimators/speech_recognition/pytorch_espresso.py +++ b/art/estimators/speech_recognition/pytorch_espresso.py @@ -338,7 +338,10 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in raise NotImplementedError def _transform_model_input( - self, x: Union[np.ndarray, "torch.Tensor"], y: Optional[np.ndarray] = None, compute_gradient: bool = False, + self, + x: Union[np.ndarray, "torch.Tensor"], + y: Optional[np.ndarray] = None, + compute_gradient: bool = False, ) -> Tuple[Dict, List]: """ Transform the user input space into the model input space. @@ -380,10 +383,22 @@ def _collate_fn(batch: List) -> dict: pad_idx = self.dictionary.pad() eos_idx = self.dictionary.eos() target = data_utils.collate_tokens( - [s[1] for s in batch], pad_idx, eos_idx, False, False, pad_to_length=None, pad_to_multiple=1, + [s[1] for s in batch], + pad_idx, + eos_idx, + False, + False, + pad_to_length=None, + pad_to_multiple=1, ) prev_output_tokens = data_utils.collate_tokens( - [s[1] for s in batch], pad_idx, eos_idx, False, True, pad_to_length=None, pad_to_multiple=1, + [s[1] for s in batch], + pad_idx, + eos_idx, + False, + True, + pad_to_length=None, + pad_to_multiple=1, ) target = target.long().to(self._device) prev_output_tokens = prev_output_tokens.long().to(self._device) @@ -441,7 +456,11 @@ def _collate_fn(batch: List) -> dict: # return inputs, targets, input_percentages, target_sizes, batch_idx return batch_dict, batch_idx - def _preprocess_transform_model_input(self, x: "torch.Tensor", y: np.ndarray,) -> Tuple[Dict, List]: + def _preprocess_transform_model_input( + self, + x: "torch.Tensor", + y: np.ndarray, + ) -> Tuple[Dict, List]: """ Apply preprocessing and then transform the user input space into the model input space. This function is used by the ASR attack to attack into the PyTorchDeepSpeech estimator whose defences are called with the @@ -470,7 +489,11 @@ def _preprocess_transform_model_input(self, x: "torch.Tensor", y: np.ndarray,) - x = torch.stack(x_batch) # Transform the input space - batch_dict, batch_idx = self._transform_model_input(x=x, y=y, compute_gradient=False,) + batch_dict, batch_idx = self._transform_model_input( + x=x, + y=y, + compute_gradient=False, + ) return batch_dict, batch_idx @@ -488,7 +511,8 @@ def compute_loss_and_decoded_output( """ # Transform data into the model input space batch_dict, batch_idx = self._preprocess_transform_model_input( - x=masked_adv_input.to(self.device), y=original_output, + x=masked_adv_input.to(self.device), + y=original_output, ) # Compute the loss diff --git a/art/evaluations/security_curve/security_curve.py b/art/evaluations/security_curve/security_curve.py index ebc529ef8e..1d595450fb 100644 --- a/art/evaluations/security_curve/security_curve.py +++ b/art/evaluations/security_curve/security_curve.py @@ -187,5 +187,8 @@ def _get_accuracy(y: np.ndarray, y_pred: np.ndarray) -> float: return np.mean(np.argmax(y, axis=1) == np.argmax(y_pred, axis=1)).item() def __repr__(self): - repr_ = "{}(eps={})".format(self.__module__ + "." + self.__class__.__name__, self.eps,) + repr_ = "{}(eps={})".format( + self.__module__ + "." + self.__class__.__name__, + self.eps, + ) return repr_ diff --git a/art/metrics/gradient_check.py b/art/metrics/gradient_check.py index 249e69234e..3fef3d416a 100644 --- a/art/metrics/gradient_check.py +++ b/art/metrics/gradient_check.py @@ -53,7 +53,11 @@ def loss_gradient_check( for i in trange(len(x), desc="Gradient check", disable=not verbose): grad = estimator.loss_gradient(x=x[[i]], y=y[[i]], training_mode=training_mode, **kwargs) is_bad.append( - [(np.min(grad) == 0 and np.max(grad) == 0), np.any(np.isnan(grad)), np.any(np.isinf(grad)),] + [ + (np.min(grad) == 0 and np.max(grad) == 0), + np.any(np.isnan(grad)), + np.any(np.isinf(grad)), + ] ) return np.array(is_bad, dtype=bool) diff --git a/art/metrics/metrics.py b/art/metrics/metrics.py index f194a9df1b..b7351bd8f6 100644 --- a/art/metrics/metrics.py +++ b/art/metrics/metrics.py @@ -47,7 +47,15 @@ "class": FastGradientMethod, "params": {"eps_step": 0.1, "eps_max": 1.0, "clip_min": 0.0, "clip_max": 1.0}, }, - "hsj": {"class": HopSkipJump, "params": {"max_iter": 50, "max_eval": 10000, "init_eval": 100, "init_size": 100,},}, + "hsj": { + "class": HopSkipJump, + "params": { + "max_iter": 50, + "max_eval": 10000, + "init_eval": 100, + "init_size": 100, + }, + }, } @@ -72,7 +80,10 @@ def get_crafter(classifier: "CLASSIFIER_TYPE", attack: str, params: Optional[Dic def empirical_robustness( - classifier: "CLASSIFIER_TYPE", x: np.ndarray, attack_name: str, attack_params: Optional[Dict[str, Any]] = None, + classifier: "CLASSIFIER_TYPE", + x: np.ndarray, + attack_name: str, + attack_params: Optional[Dict[str, Any]] = None, ) -> Union[float, np.ndarray]: """ Compute the Empirical Robustness of a classifier object over the sample `x` for a given adversarial crafting @@ -309,7 +320,8 @@ def clever_t( # Generate a pool of samples rand_pool = np.reshape( - random_sphere(nb_points=pool_factor * batch_size, nb_dims=dim, radius=radius, norm=norm), shape, + random_sphere(nb_points=pool_factor * batch_size, nb_dims=dim, radius=radius, norm=norm), + shape, ) rand_pool += np.repeat(np.array([x]), pool_factor * batch_size, 0) rand_pool = rand_pool.astype(ART_NUMPY_DTYPE) diff --git a/art/metrics/verification_decisions_trees.py b/art/metrics/verification_decisions_trees.py index 6d4d74557a..1ae0d698f5 100644 --- a/art/metrics/verification_decisions_trees.py +++ b/art/metrics/verification_decisions_trees.py @@ -116,7 +116,12 @@ class LeafNode: """ def __init__( - self, tree_id: Optional[int], class_label: int, node_id: Optional[int], box: Box, value: float, + self, + tree_id: Optional[int], + class_label: int, + node_id: Optional[int], + box: Box, + value: float, ) -> None: """ Create a leaf node representation. @@ -263,7 +268,8 @@ def verify( average_bound += clique_bound else: logger.info( - "point %s: WARNING! no robust eps found, verification bound is set as 0 !", i_sample, + "point %s: WARNING! no robust eps found, verification bound is set as 0 !", + i_sample, ) verified_error = 1.0 - num_initial_successes / num_samples @@ -275,7 +281,10 @@ def verify( return average_bound, verified_error def _get_k_partite_clique( - self, accessible_leaves: List[List[LeafNode]], label: int, target_label: Optional[int], + self, + accessible_leaves: List[List[LeafNode]], + label: int, + target_label: Optional[int], ) -> Tuple[float, List]: """ Find the K partite cliques among the accessible leaf nodes. @@ -306,7 +315,10 @@ def _get_k_partite_clique( cliques_old.append({"box": accessible_leaf.box, "value": new_leaf_value}) # Loop over all all trees - for i_tree in range(start_tree + 1, min(len(accessible_leaves), start_tree + self.max_clique),): + for i_tree in range( + start_tree + 1, + min(len(accessible_leaves), start_tree + self.max_clique), + ): cliques_new.clear() # Loop over all existing cliques for clique in cliques_old: @@ -399,7 +411,10 @@ def _get_distance(self, box: Box, i_sample: int, norm: int) -> float: if interval.lower_bound < feature_value < interval.upper_bound: distance = 0.0 else: - difference = max(feature_value - interval.upper_bound, interval.lower_bound - feature_value,) + difference = max( + feature_value - interval.upper_bound, + interval.lower_bound - feature_value, + ) if norm == 0: distance = 1.0 elif norm == np.inf: diff --git a/art/preprocessing/expectation_over_transformation/pytorch.py b/art/preprocessing/expectation_over_transformation/pytorch.py index d2679244b7..3645b6b1a6 100644 --- a/art/preprocessing/expectation_over_transformation/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/pytorch.py @@ -36,7 +36,11 @@ class EoTPyTorch(PreprocessorPyTorch): """ def __init__( - self, nb_samples: int, clip_values: Tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, + self, + nb_samples: int, + clip_values: Tuple[float, float], + apply_fit: bool = False, + apply_predict: bool = True, ) -> None: """ Create an instance of EoTPyTorch. diff --git a/art/preprocessing/expectation_over_transformation/tensorflow.py b/art/preprocessing/expectation_over_transformation/tensorflow.py index d2e4dde988..5f98e1d9c2 100644 --- a/art/preprocessing/expectation_over_transformation/tensorflow.py +++ b/art/preprocessing/expectation_over_transformation/tensorflow.py @@ -36,7 +36,11 @@ class EoTTensorFlowV2(PreprocessorTensorFlowV2): """ def __init__( - self, nb_samples: int, clip_values: Tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, + self, + nb_samples: int, + clip_values: Tuple[float, float], + apply_fit: bool = False, + apply_predict: bool = True, ) -> None: """ Create an instance of EoTTensorFlowV2. diff --git a/art/preprocessing/standardisation_mean_std/numpy.py b/art/preprocessing/standardisation_mean_std/numpy.py index ed214ba45f..a9856dd9cd 100644 --- a/art/preprocessing/standardisation_mean_std/numpy.py +++ b/art/preprocessing/standardisation_mean_std/numpy.py @@ -59,7 +59,11 @@ def __init__( self._broadcastable_mean = None self._broadcastable_std = None - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None,) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__( + self, + x: np.ndarray, + y: Optional[np.ndarray] = None, + ) -> Tuple[np.ndarray, Optional[np.ndarray]]: """ Apply StandardisationMeanStd inputs `x`. diff --git a/art/utils.py b/art/utils.py index 0f28450ae7..96152b561c 100644 --- a/art/utils.py +++ b/art/utils.py @@ -184,19 +184,28 @@ REGRESSOR_TYPE = Union[ScikitlearnRegressor, ScikitlearnDecisionTreeRegressor] # pylint: disable=C0103 PYTORCH_ESTIMATOR_TYPE = Union[ # pylint: disable=C0103 - PyTorchClassifier, PyTorchDeepSpeech, PyTorchEstimator, PyTorchObjectDetector, PyTorchFasterRCNN, + PyTorchClassifier, + PyTorchDeepSpeech, + PyTorchEstimator, + PyTorchObjectDetector, + PyTorchFasterRCNN, ] OBJECT_DETECTOR_TYPE = Union[ # pylint: disable=C0103 - ObjectDetector, PyTorchObjectDetector, PyTorchFasterRCNN, TensorFlowFasterRCNN, + ObjectDetector, + PyTorchObjectDetector, + PyTorchFasterRCNN, + TensorFlowFasterRCNN, ] SPEECH_RECOGNIZER_TYPE = Union[ # pylint: disable=C0103 - PyTorchDeepSpeech, TensorFlowLingvoASR, + PyTorchDeepSpeech, + TensorFlowLingvoASR, ] TENSORFLOWV2_ESTIMATOR_TYPE = Union[ # pylint: disable=C0103 - TensorFlowV2Classifier, TensorFlowV2Estimator, + TensorFlowV2Classifier, + TensorFlowV2Estimator, ] # --------------------------------------------------------------------------------------------------------- DEPRECATION @@ -248,7 +257,9 @@ def decorator(function): def wrapper(*args, **kwargs): warnings.simplefilter("always", category=DeprecationWarning) warnings.warn( - deprecated_msg + replaced_msg + reason_msg, category=DeprecationWarning, stacklevel=2, + deprecated_msg + replaced_msg + reason_msg, + category=DeprecationWarning, + stacklevel=2, ) warnings.simplefilter("default", category=DeprecationWarning) return function(*args, **kwargs) @@ -342,7 +353,8 @@ def projection(values: np.ndarray, eps: Union[int, float, np.ndarray], norm_p: U raise NotImplementedError("The parameter `eps` of type `np.ndarray` is not supported to use with norm 1.") values_tmp = values_tmp * np.expand_dims( - np.minimum(1.0, eps / (np.linalg.norm(values_tmp, axis=1, ord=1) + tol)), axis=1, + np.minimum(1.0, eps / (np.linalg.norm(values_tmp, axis=1, ord=1) + tol)), + axis=1, ) elif norm_p in [np.inf, "inf"]: @@ -363,7 +375,10 @@ def projection(values: np.ndarray, eps: Union[int, float, np.ndarray], norm_p: U def random_sphere( - nb_points: int, nb_dims: int, radius: Union[int, float, np.ndarray], norm: Union[int, float, str], + nb_points: int, + nb_dims: int, + radius: Union[int, float, np.ndarray], + norm: Union[int, float, str], ) -> np.ndarray: """ Generate randomly `m x n`-dimension points with radius `radius` and centered around 0. @@ -433,7 +448,9 @@ def original_to_tanh( def tanh_to_original( - x_tanh: np.ndarray, clip_min: Union[float, np.ndarray], clip_max: Union[float, np.ndarray], + x_tanh: np.ndarray, + clip_min: Union[float, np.ndarray], + clip_max: Union[float, np.ndarray], ) -> np.ndarray: """ Transform input from tanh to original space. @@ -584,7 +601,10 @@ def second_most_likely_class(x: np.ndarray, classifier: "CLASSIFIER_TYPE") -> np :param classifier: The classifier used for computing predictions. :return: Second most likely class predicted by `classifier` for sample `x` in one-hot encoding. """ - return to_categorical(np.argpartition(classifier.predict(x), -2, axis=1)[:, -2], nb_classes=classifier.nb_classes,) + return to_categorical( + np.argpartition(classifier.predict(x), -2, axis=1)[:, -2], + nb_classes=classifier.nb_classes, + ) def get_label_conf(y_vec: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: @@ -705,7 +725,9 @@ def compute_accuracy(preds: np.ndarray, labels: np.ndarray, abstain: bool = True # -------------------------------------------------------------------------------------------------- DATASET OPERATIONS -def load_cifar10(raw: bool = False,) -> DATASET_TYPE: +def load_cifar10( + raw: bool = False, +) -> DATASET_TYPE: """ Loads CIFAR10 dataset from config.CIFAR10_PATH or downloads it if necessary. @@ -771,14 +793,20 @@ def load_batch(fpath: str) -> Tuple[np.ndarray, np.ndarray]: return (x_train, y_train), (x_test, y_test), min_, max_ -def load_mnist(raw: bool = False,) -> DATASET_TYPE: +def load_mnist( + raw: bool = False, +) -> DATASET_TYPE: """ Loads MNIST dataset from `config.ART_DATA_PATH` or downloads it if necessary. :param raw: `True` if no preprocessing should be applied to the data. Otherwise, data is normalized to 1. :return: `(x_train, y_train), (x_test, y_test), min, max`. """ - path = get_file("mnist.npz", path=config.ART_DATA_PATH, url="https://s3.amazonaws.com/img-datasets/mnist.npz",) + path = get_file( + "mnist.npz", + path=config.ART_DATA_PATH, + url="https://s3.amazonaws.com/img-datasets/mnist.npz", + ) dict_mnist = np.load(path) x_train = dict_mnist["x_train"] @@ -863,16 +891,32 @@ def load_iris(raw: bool = False, test_set: float = 0.3) -> DATASET_TYPE: # Split training and test sets split_index = int((1 - test_set) * len(data) / 3) x_train = np.vstack((data[:split_index], data[50 : 50 + split_index], data[100 : 100 + split_index])) - y_train = np.vstack((labels[:split_index], labels[50 : 50 + split_index], labels[100 : 100 + split_index],)) + y_train = np.vstack( + ( + labels[:split_index], + labels[50 : 50 + split_index], + labels[100 : 100 + split_index], + ) + ) if split_index >= 49: x_test, y_test = None, None else: - x_test = np.vstack((data[split_index:50], data[50 + split_index : 100], data[100 + split_index :],)).astype( - np.float32 + x_test = np.vstack( + ( + data[split_index:50], + data[50 + split_index : 100], + data[100 + split_index :], + ) + ).astype(np.float32) + y_test = np.vstack( + ( + labels[split_index:50], + labels[50 + split_index : 100], + labels[100 + split_index :], + ) ) - y_test = np.vstack((labels[split_index:50], labels[50 + split_index : 100], labels[100 + split_index :],)) assert len(x_train) + len(x_test) == 150 # Shuffle test set @@ -1018,7 +1062,9 @@ def modify_social(value): return (x_train, y_train), (x_test, y_test), min_, max_ -def load_dataset(name: str,) -> DATASET_TYPE: +def load_dataset( + name: str, +) -> DATASET_TYPE: """ Loads or downloads the dataset corresponding to `name`. Options are: `mnist`, `cifar10`, `stl10`, `iris`, `nursery` and `diabetes`. @@ -1186,7 +1232,10 @@ def clip_and_round(x: np.ndarray, clip_values: Optional["CLIP_VALUES_TYPE"], rou def preprocess( - x: np.ndarray, y: np.ndarray, nb_classes: int = 10, clip_values: Optional["CLIP_VALUES_TYPE"] = None, + x: np.ndarray, + y: np.ndarray, + nb_classes: int = 10, + clip_values: Optional["CLIP_VALUES_TYPE"] = None, ) -> Tuple[np.ndarray, np.ndarray]: """ Scales `x` to [0, 1] and converts `y` to class categorical confidences. diff --git a/art/visualization.py b/art/visualization.py index 386f342cee..9e95597e15 100644 --- a/art/visualization.py +++ b/art/visualization.py @@ -108,7 +108,11 @@ def save_image(image_array: np.ndarray, f_name: str) -> None: def plot_3d( - points: np.ndarray, labels: List[int], colors: Optional[List[str]] = None, save: bool = True, f_name: str = "", + points: np.ndarray, + labels: List[int], + colors: Optional[List[str]] = None, + save: bool = True, + f_name: str = "", ) -> "matplotlib.figure.Figure": # pragma: no cover """ Generates a 3-D plot in of the provided points where the labels define the color that will be used to color each diff --git a/tests/attacks/evasion/test_auto_attack.py b/tests/attacks/evasion/test_auto_attack.py index b32638e111..036b2ce9d7 100644 --- a/tests/attacks/evasion/test_auto_attack.py +++ b/tests/attacks/evasion/test_auto_attack.py @@ -47,7 +47,13 @@ def test_generate_default(art_warning, fix_get_mnist_subset, image_dl_estimator) classifier, _ = image_dl_estimator(from_logits=True) attack = AutoAttack( - estimator=classifier, norm=np.inf, eps=0.3, eps_step=0.1, attacks=None, batch_size=32, estimator_orig=None, + estimator=classifier, + norm=np.inf, + eps=0.3, + eps_step=0.1, + attacks=None, + batch_size=32, + estimator_orig=None, ) (x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset diff --git a/tests/attacks/evasion/test_dpatch.py b/tests/attacks/evasion/test_dpatch.py index 01b41a6baa..4b30cf4912 100644 --- a/tests/attacks/evasion/test_dpatch.py +++ b/tests/attacks/evasion/test_dpatch.py @@ -44,7 +44,14 @@ def test_generate(art_warning, fix_get_mnist_subset, fix_get_rcnn): (_, _, x_test_mnist, y_test_mnist) = fix_get_mnist_subset frcnn = fix_get_rcnn - attack = DPatch(frcnn, patch_shape=(4, 4, 1), learning_rate=1.0, max_iter=1, batch_size=1, verbose=False,) + attack = DPatch( + frcnn, + patch_shape=(4, 4, 1), + learning_rate=1.0, + max_iter=1, + batch_size=1, + verbose=False, + ) patch = attack.generate(x=x_test_mnist[[0]]) assert patch.shape == (4, 4, 1) diff --git a/tests/attacks/evasion/test_imperceptible_asr_pytorch.py b/tests/attacks/evasion/test_imperceptible_asr_pytorch.py index 3b4c566032..60f052bee7 100644 --- a/tests/attacks/evasion/test_imperceptible_asr_pytorch.py +++ b/tests/attacks/evasion/test_imperceptible_asr_pytorch.py @@ -145,7 +145,10 @@ def test_check_params(art_warning): from art.attacks.evasion.imperceptible_asr.imperceptible_asr_pytorch import ImperceptibleASRPyTorch speech_recognizer = PyTorchDeepSpeech( - pretrained_model="librispeech", device_type="cpu", use_amp=False, preprocessing_defences=None, + pretrained_model="librispeech", + device_type="cpu", + use_amp=False, + preprocessing_defences=None, ) with pytest.raises(ValueError): diff --git a/tests/attacks/evasion/test_pe_malware_attack.py b/tests/attacks/evasion/test_pe_malware_attack.py index e7aeb840fe..c4fcf71e1e 100644 --- a/tests/attacks/evasion/test_pe_malware_attack.py +++ b/tests/attacks/evasion/test_pe_malware_attack.py @@ -62,7 +62,12 @@ def get_prediction_model(param_dic): Needs to have the same structure as the target model. Populated here with "standard" parameters. """ - inp = tf.keras.layers.Input(shape=(param_dic["maxlen"], param_dic["embedding_size"],)) + inp = tf.keras.layers.Input( + shape=( + param_dic["maxlen"], + param_dic["embedding_size"], + ) + ) filt = tf.keras.layers.Conv1D( filters=128, kernel_size=500, diff --git a/tests/classifiersFrameworks/test_pytorch.py b/tests/classifiersFrameworks/test_pytorch.py index 902988d111..d31c06a1ae 100644 --- a/tests/classifiersFrameworks/test_pytorch.py +++ b/tests/classifiersFrameworks/test_pytorch.py @@ -261,7 +261,11 @@ def forward(self, x): model.to(device) opt = optim.Adam(model.parameters(), lr=0.001) classifier = PyTorchClassifier( - model=model, loss=loss_func, optimizer=opt, input_shape=(1, 28, 28), nb_classes=2, + model=model, + loss=loss_func, + optimizer=opt, + input_shape=(1, 28, 28), + nb_classes=2, ) classifier.fit(train_x, train_y, batch_size=64, nb_epochs=3) test_x_batch = test_x[0:16] diff --git a/tests/defences/detector/poison/test_provenance_defence.py b/tests/defences/detector/poison/test_provenance_defence.py index e6d388c686..bcf03cfc10 100644 --- a/tests/defences/detector/poison/test_provenance_defence.py +++ b/tests/defences/detector/poison/test_provenance_defence.py @@ -123,7 +123,13 @@ def setUpClass(cls): cls.classifier.fit(all_data, all_labels) cls.defence_trust = ProvenanceDefense( - cls.classifier, all_data, all_labels, all_p, x_val=trusted_data, y_val=trusted_labels, eps=0.1, + cls.classifier, + all_data, + all_labels, + all_p, + x_val=trusted_data, + y_val=trusted_labels, + eps=0.1, ) cls.defence_no_trust = ProvenanceDefense(cls.classifier, all_data, all_labels, all_p, eps=0.1) @@ -141,17 +147,28 @@ def test_wrong_parameters_2(self): def test_wrong_parameters_3(self): (all_data, _, _), (_, y_test), (_, _), (_, _), (_, _) = self.mnist self.assertRaises( - ValueError, self.defence_no_trust.set_params, x_train=-all_data, y_train=y_test, + ValueError, + self.defence_no_trust.set_params, + x_train=-all_data, + y_train=y_test, ) self.assertRaises(ValueError, self.defence_trust.set_params, x_train=-all_data, y_train=y_test) def test_wrong_parameters_4(self): (_, _, p_train), (x_test, y_test), (_, _), (_, _), (_, _) = self.mnist self.assertRaises( - ValueError, self.defence_no_trust.set_params, x_train=-x_test, y_train=y_test, p_train=p_train, + ValueError, + self.defence_no_trust.set_params, + x_train=-x_test, + y_train=y_test, + p_train=p_train, ) self.assertRaises( - ValueError, self.defence_trust.set_params, x_train=-x_test, y_train=y_test, p_train=p_train, + ValueError, + self.defence_trust.set_params, + x_train=-x_test, + y_train=y_test, + p_train=p_train, ) def test_detect_poison(self): diff --git a/tests/defences/detector/poison/test_roni.py b/tests/defences/detector/poison/test_roni.py index 0f062fcf98..5510a6e5a0 100644 --- a/tests/defences/detector/poison/test_roni.py +++ b/tests/defences/detector/poison/test_roni.py @@ -113,10 +113,22 @@ def setUpClass(cls): cls.classifier.fit(all_data, all_labels) cls.defense_cal = RONIDefense( - cls.classifier, all_data, all_labels, trusted_data, trusted_labels, eps=0.1, calibrated=True, + cls.classifier, + all_data, + all_labels, + trusted_data, + trusted_labels, + eps=0.1, + calibrated=True, ) cls.defence_no_cal = RONIDefense( - cls.classifier, all_data, all_labels, trusted_data, trusted_labels, eps=0.1, calibrated=False, + cls.classifier, + all_data, + all_labels, + trusted_data, + trusted_labels, + eps=0.1, + calibrated=False, ) def setUp(self): @@ -129,7 +141,10 @@ def test_wrong_parameters_1(self): def test_wrong_parameters_2(self): (all_data, _), (_, y_test), (_, _), (_, _), (_, _) = self.mnist self.assertRaises( - ValueError, self.defence_no_cal.set_params, x_train=-all_data, y_train=y_test, + ValueError, + self.defence_no_cal.set_params, + x_train=-all_data, + y_train=y_test, ) self.assertRaises(ValueError, self.defense_cal.set_params, x_train=-all_data, y_train=y_test) diff --git a/tests/defences/preprocessor/test_inverse_gan.py b/tests/defences/preprocessor/test_inverse_gan.py index a8edc0d7a6..c1e4761032 100644 --- a/tests/defences/preprocessor/test_inverse_gan.py +++ b/tests/defences/preprocessor/test_inverse_gan.py @@ -56,7 +56,9 @@ def test_inverse_gan(art_warning, fix_get_mnist_subset, image_dl_estimator_for_a x_test_defended = inverse_gan(x_test_adv, maxiter=1) np.testing.assert_array_almost_equal( - float(np.mean(x_test_defended - x_test_adv)), 0.08818667382001877, decimal=0.01, + float(np.mean(x_test_defended - x_test_adv)), + 0.08818667382001877, + decimal=0.01, ) except ARTTestException as e: art_warning(e) diff --git a/tests/defences/trainer/test_adversarial_trainer_FBF.py b/tests/defences/trainer/test_adversarial_trainer_FBF.py index 01ebfcb318..40653a926a 100644 --- a/tests/defences/trainer/test_adversarial_trainer_FBF.py +++ b/tests/defences/trainer/test_adversarial_trainer_FBF.py @@ -69,7 +69,9 @@ def test_adversarial_trainer_fbf_pytorch_fit_and_predict(get_adv_trainer, fix_ge accuracy_new = np.sum(predictions_new == np.argmax(y_test_mnist, axis=1)) / x_test_mnist.shape[0] np.testing.assert_array_almost_equal( - float(np.mean(x_test_mnist_original - x_test_mnist)), 0.0, decimal=4, + float(np.mean(x_test_mnist_original - x_test_mnist)), + 0.0, + decimal=4, ) assert accuracy == 0.32 @@ -100,7 +102,9 @@ def test_adversarial_trainer_fbf_pytorch_fit_generator_and_predict( accuracy_new = np.sum(predictions_new == np.argmax(y_test_mnist, axis=1)) / x_test_mnist.shape[0] np.testing.assert_array_almost_equal( - float(np.mean(x_test_mnist_original - x_test_mnist)), 0.0, decimal=4, + float(np.mean(x_test_mnist_original - x_test_mnist)), + 0.0, + decimal=4, ) assert accuracy == 0.32 diff --git a/tests/estimators/certification/test_randomized_smoothing.py b/tests/estimators/certification/test_randomized_smoothing.py index 15869fba27..9d37d735a4 100644 --- a/tests/estimators/certification/test_randomized_smoothing.py +++ b/tests/estimators/certification/test_randomized_smoothing.py @@ -82,7 +82,12 @@ def test_3_kr(self): x_test_adv = fgsm.generate(x_test, **params) # Initialize RS object and attack with FGSM - rs = NumpyRandomizedSmoothing(classifier=classifier, sample_size=100, scale=0.01, alpha=0.001,) + rs = NumpyRandomizedSmoothing( + classifier=classifier, + sample_size=100, + scale=0.01, + alpha=0.001, + ) fgsm_with_rs = FastGradientMethod(estimator=rs, targeted=True) x_test_adv_with_rs = fgsm_with_rs.generate(x_test, **params) diff --git a/tests/estimators/classification/test_deeplearning_common.py b/tests/estimators/classification/test_deeplearning_common.py index fd133311d8..5045e86b38 100644 --- a/tests/estimators/classification/test_deeplearning_common.py +++ b/tests/estimators/classification/test_deeplearning_common.py @@ -313,7 +313,9 @@ def test_loss_gradient( sub_gradients = gradients[0, :, 14, 0] np.testing.assert_array_almost_equal( - sub_gradients, expected_gradients_1[0], decimal=expected_gradients_1[1], + sub_gradients, + expected_gradients_1[0], + decimal=expected_gradients_1[1], ) if mnist_shape[0] == 1: @@ -322,7 +324,9 @@ def test_loss_gradient( sub_gradients = gradients[0, 14, :, 0] np.testing.assert_array_almost_equal( - sub_gradients, expected_gradients_2[0], decimal=expected_gradients_2[1], + sub_gradients, + expected_gradients_2[0], + decimal=expected_gradients_2[1], ) except ARTTestException as e: art_warning(e) @@ -453,48 +457,71 @@ def get_gradient4_column(gradients): # Test all gradients label gradients = classifier.class_gradient(x_test_mnist) - new_shape = (x_test_mnist.shape[0], 10,) + mnist_shape + new_shape = ( + x_test_mnist.shape[0], + 10, + ) + mnist_shape assert gradients.shape == new_shape sub_gradients2 = get_gradient2_column(gradients) if framework != "mxnet": np.testing.assert_array_almost_equal( - sub_gradients2, grad_2_all_labels[0], decimal=4, + sub_gradients2, + grad_2_all_labels[0], + decimal=4, ) # Test 1 gradient label = 5 gradients = classifier.class_gradient(x_test_mnist, label=5) - assert gradients.shape == (x_test_mnist.shape[0], 1,) + mnist_shape + assert ( + gradients.shape + == ( + x_test_mnist.shape[0], + 1, + ) + + mnist_shape + ) sub_gradients2 = get_gradient3_column(gradients) if framework != "mxnet": np.testing.assert_array_almost_equal( - sub_gradients2, grad_1_label5[0], decimal=4, + sub_gradients2, + grad_1_label5[0], + decimal=4, ) sub_gradients4 = get_gradient4_column(gradients) if framework != "mxnet": np.testing.assert_array_almost_equal( - sub_gradients4, grad_2_label5[0], decimal=4, + sub_gradients4, + grad_2_label5[0], + decimal=4, ) # # Test a set of gradients label = array gradients = classifier.class_gradient(x_test_mnist, label=labels) - new_shape = (x_test_mnist.shape[0], 1,) + mnist_shape + new_shape = ( + x_test_mnist.shape[0], + 1, + ) + mnist_shape assert gradients.shape == new_shape sub_gradients5 = get_gradient3_column(gradients) if framework != "mxnet": np.testing.assert_array_almost_equal( - sub_gradients5, grad_1_labelArray[0], decimal=4, + sub_gradients5, + grad_1_labelArray[0], + decimal=4, ) sub_gradients6 = get_gradient4_column(gradients) if framework != "mxnet": np.testing.assert_array_almost_equal( - sub_gradients6, grad_2_labelArray[0], decimal=4, + sub_gradients6, + grad_2_labelArray[0], + decimal=4, ) except ARTTestException as e: @@ -525,7 +552,8 @@ def test_compute_loss( @pytest.mark.skip_framework("keras", "kerastf", "mxnet", "non_dl_frameworks") def test_clone_for_refitting( - art_warning, image_dl_estimator, + art_warning, + image_dl_estimator, ): try: classifier, _ = image_dl_estimator(functional=True) diff --git a/tests/estimators/classification/test_deeplearning_specific.py b/tests/estimators/classification/test_deeplearning_specific.py index 5ad33003fb..69e5c2ed89 100644 --- a/tests/estimators/classification/test_deeplearning_specific.py +++ b/tests/estimators/classification/test_deeplearning_specific.py @@ -150,7 +150,12 @@ def test_pickle(art_warning, get_default_mnist_subset, image_dl_estimator): @pytest.mark.skip_framework("tensorflow", "tensorflow2v1", "keras", "kerastf", "mxnet", "non_dl_frameworks") @pytest.mark.parametrize("device_type", ["cpu", "gpu"]) def test_loss_gradient_amp( - art_warning, get_default_mnist_subset, image_dl_estimator, expected_values, mnist_shape, device_type, + art_warning, + get_default_mnist_subset, + image_dl_estimator, + expected_values, + mnist_shape, + device_type, ): import torch import torch.nn as nn @@ -191,14 +196,18 @@ def test_loss_gradient_amp( sub_gradients = gradients[0, 0, :, 14] np.testing.assert_array_almost_equal( - sub_gradients, expected_gradients_1, decimal=4, + sub_gradients, + expected_gradients_1, + decimal=4, ) # Second test of gradients sub_gradients = gradients[0, 0, 14, :] np.testing.assert_array_almost_equal( - sub_gradients, expected_gradients_2, decimal=4, + sub_gradients, + expected_gradients_2, + decimal=4, ) # Compute loss gradients with framework @@ -214,14 +223,18 @@ def test_loss_gradient_amp( sub_gradients = gradients[0, 0, :, 14] np.testing.assert_array_almost_equal( - sub_gradients, expected_gradients_1, decimal=4, + sub_gradients, + expected_gradients_1, + decimal=4, ) # Second test of gradients sub_gradients = gradients[0, 0, 14, :] np.testing.assert_array_almost_equal( - sub_gradients, expected_gradients_2, decimal=4, + sub_gradients, + expected_gradients_2, + decimal=4, ) except ARTTestException as e: @@ -230,7 +243,8 @@ def test_loss_gradient_amp( @pytest.mark.skip_framework("tensorflow2", "tensorflow2v1", "keras", "kerastf", "mxnet", "non_dl_frameworks") def test_tensorflow_1_state( - art_warning, image_dl_estimator, + art_warning, + image_dl_estimator, ): try: classifier, _ = image_dl_estimator(from_logits=True) diff --git a/tests/estimators/classification/test_scikitlearn.py b/tests/estimators/classification/test_scikitlearn.py index 2a26cc2c93..3d4f8d08cd 100644 --- a/tests/estimators/classification/test_scikitlearn.py +++ b/tests/estimators/classification/test_scikitlearn.py @@ -361,7 +361,9 @@ def setUpClass(cls): x_train_binary = cls.x_train_iris[ binary_class_index, ] - y_train_binary = cls.y_train_iris[binary_class_index,][:, [0, 1]] + y_train_binary = cls.y_train_iris[ + binary_class_index, + ][:, [0, 1]] cls.sklearn_model = LogisticRegression( verbose=0, C=1, solver="newton-cg", dual=False, fit_intercept=True, multi_class="ovr" @@ -386,7 +388,9 @@ def test_loss_gradient(self): x_test_binary = self.x_test_iris[ binary_class_index, ] - y_test_binary = self.y_test_iris[binary_class_index,][:, [0, 1]] + y_test_binary = self.y_test_iris[ + binary_class_index, + ][:, [0, 1]] grad_predicted = self.classifier.loss_gradient(x_test_binary[0:1], y_test_binary[0:1]) grad_expected = np.asarray([[-0.3771413, 0.31875887, -1.18806318, -0.46225301]]) diff --git a/tests/estimators/object_detection/test_pytorch_faster_rcnn.py b/tests/estimators/object_detection/test_pytorch_faster_rcnn.py index 8a37aef458..dcd124892f 100644 --- a/tests/estimators/object_detection/test_pytorch_faster_rcnn.py +++ b/tests/estimators/object_detection/test_pytorch_faster_rcnn.py @@ -56,15 +56,28 @@ def setUpClass(cls): result = cls.obj_detect.predict(x=cls.x_test) cls.y_test = [ - {"boxes": result[0]["boxes"], "labels": result[0]["labels"], "scores": np.ones_like(result[0]["labels"]),}, - {"boxes": result[1]["boxes"], "labels": result[1]["labels"], "scores": np.ones_like(result[1]["labels"]),}, + { + "boxes": result[0]["boxes"], + "labels": result[0]["labels"], + "scores": np.ones_like(result[0]["labels"]), + }, + { + "boxes": result[1]["boxes"], + "labels": result[1]["labels"], + "scores": np.ones_like(result[1]["labels"]), + }, ] def test_predict(self): result = self.obj_detect.predict(self.x_test_mnist.astype(np.float32)) self.assertTrue( - list(result[0].keys()) == ["boxes", "labels", "scores",] + list(result[0].keys()) + == [ + "boxes", + "labels", + "scores", + ] ) self.assertTrue(result[0]["boxes"].shape == (7, 4)) @@ -204,8 +217,16 @@ def test_preprocessing_defences(self): result = frcnn.predict(x=self.x_test) y = [ - {"boxes": result[0]["boxes"], "labels": result[0]["labels"], "scores": np.ones_like(result[0]["labels"]),}, - {"boxes": result[1]["boxes"], "labels": result[1]["labels"], "scores": np.ones_like(result[1]["labels"]),}, + { + "boxes": result[0]["boxes"], + "labels": result[0]["labels"], + "scores": np.ones_like(result[0]["labels"]), + }, + { + "boxes": result[1]["boxes"], + "labels": result[1]["labels"], + "scores": np.ones_like(result[1]["labels"]), + }, ] # Compute gradients @@ -233,8 +254,16 @@ def test_compute_loss(self): result = frcnn.predict(np.repeat(self.x_test_mnist[:2].astype(np.float32), repeats=3, axis=3)) y = [ - {"boxes": result[0]["boxes"], "labels": result[0]["labels"], "scores": np.ones_like(result[0]["labels"]),}, - {"boxes": result[1]["boxes"], "labels": result[1]["labels"], "scores": np.ones_like(result[1]["labels"]),}, + { + "boxes": result[0]["boxes"], + "labels": result[0]["labels"], + "scores": np.ones_like(result[0]["labels"]), + }, + { + "boxes": result[1]["boxes"], + "labels": result[1]["labels"], + "scores": np.ones_like(result[1]["labels"]), + }, ] # Compute loss diff --git a/tests/estimators/object_detection/test_pytorch_object_detector.py b/tests/estimators/object_detection/test_pytorch_object_detector.py index ade8764c5b..03b935c618 100644 --- a/tests/estimators/object_detection/test_pytorch_object_detector.py +++ b/tests/estimators/object_detection/test_pytorch_object_detector.py @@ -70,7 +70,12 @@ def test_predict_1(self): result = self.obj_detect_1.predict(self.x_test_mnist.astype(np.float32)) self.assertTrue( - list(result[0].keys()) == ["boxes", "labels", "scores",] + list(result[0].keys()) + == [ + "boxes", + "labels", + "scores", + ] ) self.assertTrue(result[0]["boxes"].shape == (7, 4)) @@ -91,7 +96,13 @@ def test_predict_2(self): result = self.obj_detect_2.predict(self.x_test_mnist.astype(np.float32)) self.assertTrue( - list(result[0].keys()) == ["boxes", "labels", "scores", "masks",] + list(result[0].keys()) + == [ + "boxes", + "labels", + "scores", + "masks", + ] ) self.assertTrue(result[0]["boxes"].shape == (4, 4)) @@ -111,8 +122,16 @@ def test_loss_gradient_1(self): result = self.obj_detect_1.predict(np.repeat(self.x_test_mnist[:2].astype(np.float32), repeats=3, axis=3)) y = [ - {"boxes": result[0]["boxes"], "labels": result[0]["labels"], "scores": np.ones_like(result[0]["labels"]),}, - {"boxes": result[1]["boxes"], "labels": result[1]["labels"], "scores": np.ones_like(result[1]["labels"]),}, + { + "boxes": result[0]["boxes"], + "labels": result[0]["labels"], + "scores": np.ones_like(result[0]["labels"]), + }, + { + "boxes": result[1]["boxes"], + "labels": result[1]["labels"], + "scores": np.ones_like(result[1]["labels"]), + }, ] # Compute gradients diff --git a/tests/estimators/object_detection/test_tensorflow_faster_rcnn.py b/tests/estimators/object_detection/test_tensorflow_faster_rcnn.py index 50a3c2e4a2..62b5c48f4e 100644 --- a/tests/estimators/object_detection/test_tensorflow_faster_rcnn.py +++ b/tests/estimators/object_detection/test_tensorflow_faster_rcnn.py @@ -259,8 +259,16 @@ def test_compute_losses(art_warning, get_mnist_dataset): result = frcnn.predict(np.repeat(x_test_mnist[:2].astype(np.float32), repeats=3, axis=3)) y = [ - {"boxes": result[0]["boxes"], "labels": result[0]["labels"], "scores": np.ones_like(result[0]["labels"]),}, - {"boxes": result[1]["boxes"], "labels": result[1]["labels"], "scores": np.ones_like(result[1]["labels"]),}, + { + "boxes": result[0]["boxes"], + "labels": result[0]["labels"], + "scores": np.ones_like(result[0]["labels"]), + }, + { + "boxes": result[1]["boxes"], + "labels": result[1]["labels"], + "scores": np.ones_like(result[1]["labels"]), + }, ] # Compute losses @@ -296,8 +304,16 @@ def test_compute_loss(art_warning, get_mnist_dataset): result = frcnn.predict(np.repeat(x_test_mnist[:2].astype(np.float32), repeats=3, axis=3)) y = [ - {"boxes": result[0]["boxes"], "labels": result[0]["labels"], "scores": np.ones_like(result[0]["labels"]),}, - {"boxes": result[1]["boxes"], "labels": result[1]["labels"], "scores": np.ones_like(result[1]["labels"]),}, + { + "boxes": result[0]["boxes"], + "labels": result[0]["labels"], + "scores": np.ones_like(result[0]["labels"]), + }, + { + "boxes": result[1]["boxes"], + "labels": result[1]["labels"], + "scores": np.ones_like(result[1]["labels"]), + }, ] # Compute loss diff --git a/tests/preprocessing/audio/test_l_filter_pytorch.py b/tests/preprocessing/audio/test_l_filter_pytorch.py index 71b573ba99..fdc1116482 100644 --- a/tests/preprocessing/audio/test_l_filter_pytorch.py +++ b/tests/preprocessing/audio/test_l_filter_pytorch.py @@ -168,7 +168,8 @@ def test_check_params(art_warning): with pytest.raises(ValueError): _ = LFilterPyTorch( - numerator_coef=np.array([0.1, 0.2, -0.1, -0.2]), denominator_coef=np.array([1.0, 0.1, 0.3, 0.4, 0.2]), + numerator_coef=np.array([0.1, 0.2, -0.1, -0.2]), + denominator_coef=np.array([1.0, 0.1, 0.3, 0.4, 0.2]), ) with pytest.raises(ValueError): diff --git a/tests/utils.py b/tests/utils.py index 72e99b70f5..636a3c69aa 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1033,7 +1033,12 @@ def get_image_classifier_pt_functional(): optimizer = optim.Adam(model.parameters(), lr=0.01) classifier = PyTorchClassifier( - model=model, clip_values=(0, 1), loss=criterion, optimizer=optimizer, input_shape=(1, 28, 28), nb_classes=10, + model=model, + clip_values=(0, 1), + loss=criterion, + optimizer=optimizer, + input_shape=(1, 28, 28), + nb_classes=10, ) return classifier @@ -1141,9 +1146,17 @@ def get_gan_inverse_gan_ft(): sess = tf.Session() sess.run(tf.global_variables_initializer()) - gan = TensorFlowGenerator(input_ph=z_ph, model=gen_tf, sess=sess,) + gan = TensorFlowGenerator( + input_ph=z_ph, + model=gen_tf, + sess=sess, + ) - inverse_gan = TensorFlowEncoder(input_ph=image_to_enc_ph, model=enc_tf, sess=sess,) + inverse_gan = TensorFlowEncoder( + input_ph=image_to_enc_ph, + model=enc_tf, + sess=sess, + ) return gan, inverse_gan, sess