Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from art import NUMPY_DTYPE
from art.classifiers.classifier import ClassifierGradients
from art.attacks.attack import Attack
from art.utils import compute_success, get_labels_np_array, random_sphere, projection, check_and_transform_label_format
logger = logging.getLogger(__name__)
class FastGradientMethod(Attack):
"""
This attack was originally implemented by Goodfellow et al. (2015) with the infinity norm (and is known as the "Fast
Gradient Sign Method"). This implementation extends the attack to other norms, and is therefore called the Fast
Gradient Method.
| Paper link: https://arxiv.org/abs/1412.6572
"""
attack_params = Attack.attack_params + ['norm', 'eps', 'eps_step', 'targeted', 'num_random_init', 'batch_size',
'minimal']
def __init__(self, classifier, norm=np.inf, eps=.3, eps_step=0.1, targeted=False, num_random_init=0, batch_size=1,
minimal=False):
"""
Create a :class:`.FastGradientMethod` instance.
:param classifier: A trained classifier.
from art.config import ART_NUMPY_DTYPE
from art.attacks.attack import Attack
from art.classifiers.classifier import Classifier
from art.utils import to_categorical
logger = logging.getLogger(__name__)
class CopycatCNN(Attack):
"""
Implementation of the copycat cnn attack from Jacson et al. (2018).
| Paper link: https://arxiv.org/abs/1806.05476
"""
attack_params = Attack.attack_params + ['batch_size', 'nb_epochs', 'nb_stolen']
def __init__(self, classifier, batch_size=1, nb_epochs=10, nb_stolen=1):
"""
Create a copycat cnn attack instance.
:param classifier: A victim classifier.
:type classifier: :class:`.Classifier`
:param batch_size: Size of batches.
:type batch_size: `int`
:param nb_epochs: Number of epochs to use for training.
:type nb_epochs: `int`
:param nb_stolen: Number of queries submitted to the victim classifier to steal it.
:type nb_stolen: `int`
"""
super(CopycatCNN, self).__init__(classifier=classifier)
| Paper link: https://arxiv.org/abs/1712.04248
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from art import NUMPY_DTYPE
from art.attacks.attack import Attack
from art.utils import compute_success, to_categorical, check_and_transform_label_format
logger = logging.getLogger(__name__)
class BoundaryAttack(Attack):
"""
Implementation of the boundary attack from Brendel et al. (2018). This is a powerful black-box attack that
only requires final class prediction.
| Paper link: https://arxiv.org/abs/1712.04248
"""
attack_params = Attack.attack_params + ['targeted', 'delta', 'epsilon', 'step_adapt', 'max_iter', 'num_trial',
'sample_size', 'init_size', 'batch_size']
def __init__(self, classifier, targeted=True, delta=0.01, epsilon=0.01, step_adapt=0.667, max_iter=5000,
num_trial=25, sample_size=20, init_size=100):
"""
Create a boundary attack instance.
:param classifier: A trained classifier.
:type classifier: :class:`.Classifier`
import logging
import random
import numpy as np
from scipy.ndimage import rotate, shift, zoom
from art import NUMPY_DTYPE
from art.classifiers.classifier import ClassifierNeuralNetwork, ClassifierGradients
from art.attacks.attack import Attack
from art.utils import check_and_transform_label_format
logger = logging.getLogger(__name__)
class AdversarialPatch(Attack):
"""
Implementation of the adversarial patch attack.
| Paper link: https://arxiv.org/abs/1712.09665
"""
attack_params = Attack.attack_params + ["target", "rotation_max", "scale_min", "scale_max", "learning_rate",
"max_iter", "batch_size", "clip_patch"]
def __init__(self, classifier, target=0, rotation_max=22.5, scale_min=0.1, scale_max=1.0, learning_rate=5.0,
max_iter=500, clip_patch=None, batch_size=16):
"""
Create an instance of the :class:`.AdversarialPatch`.
:param classifier: A trained classifier.
:type classifier: :class:`.Classifier`
| Paper link: https://arxiv.org/abs/1507.00677
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from art import NUMPY_DTYPE
from art.classifiers.classifier import ClassifierNeuralNetwork, ClassifierGradients
from art.attacks.attack import Attack
logger = logging.getLogger(__name__)
class VirtualAdversarialMethod(Attack):
"""
This attack was originally proposed by Miyato et al. (2016) and was used for virtual adversarial training.
| Paper link: https://arxiv.org/abs/1507.00677
"""
attack_params = Attack.attack_params + ['eps', 'finite_diff', 'max_iter', 'batch_size']
def __init__(self, classifier, max_iter=10, finite_diff=1e-6, eps=.1, batch_size=1):
"""
Create a VirtualAdversarialMethod instance.
:param classifier: A trained classifier.
:type classifier: :class:`.Classifier`
:param eps: Attack step (max input variation).
:type eps: `float`
:param finite_diff: The finite difference parameter.
from art import NUMPY_DTYPE
from art.classifiers.classifier import ClassifierNeuralNetwork, ClassifierGradients
from art.attacks.attack import Attack
from art.utils import check_and_transform_label_format
logger = logging.getLogger(__name__)
class AdversarialPatch(Attack):
"""
Implementation of the adversarial patch attack.
| Paper link: https://arxiv.org/abs/1712.09665
"""
attack_params = Attack.attack_params + ["target", "rotation_max", "scale_min", "scale_max", "learning_rate",
"max_iter", "batch_size", "clip_patch"]
def __init__(self, classifier, target=0, rotation_max=22.5, scale_min=0.1, scale_max=1.0, learning_rate=5.0,
max_iter=500, clip_patch=None, batch_size=16):
"""
Create an instance of the :class:`.AdversarialPatch`.
:param classifier: A trained classifier.
:type classifier: :class:`.Classifier`
:param target: The target label for the created patch.
:type target: `int`
:param rotation_max: The maximum rotation applied to random patches. The value is expected to be in the
range `[0, 180]`.
:type rotation_max: `float`
:param scale_min: The minimum scaling applied to random patches. The value should be in the range `[0, 1]`,
but less than `scale_max`.
logger = logging.getLogger(__name__)
class UniversalPerturbation(Attack):
"""
Implementation of the attack from Moosavi-Dezfooli et al. (2016). Computes a fixed perturbation to be applied to all
future inputs. To this end, it can use any adversarial attack method. Paper link: https://arxiv.org/abs/1610.08401
"""
attacks_dict = {'carlini': 'art.attacks.carlini.CarliniL2Method',
'deepfool': 'art.attacks.deepfool.DeepFool',
'fgsm': 'art.attacks.fast_gradient.FastGradientMethod',
'newtonfool': 'art.attacks.newtonfool.NewtonFool',
'jsma': 'art.attacks.saliency_map.SaliencyMapMethod',
'vat': 'art.attacks.virtual_adversarial.VirtualAdversarialMethod'
}
attack_params = Attack.attack_params + ['attacker', 'attacker_params', 'delta', 'max_iter', 'eps', 'norm']
def __init__(self, classifier, attacker='deepfool', attacker_params=None, delta=0.2, max_iter=20, eps=10.0,
norm=np.inf):
"""
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param attacker: Adversarial attack name. Default is 'deepfool'. Supported names: 'carlini', 'deepfool', 'fgsm',
'newtonfool', 'jsma', 'vat'.
:type attacker: `str`
:param attacker_params: Parameters specific to the adversarial attack.
:type attacker_params: `dict`
:param delta: desired accuracy
:type delta: `float`
:param max_iter: The maximum number of iterations for computing universal perturbation.
:type max_iter: `int`
:param eps: Attack step size (input variation)
import numpy as np
from art import NUMPY_DTYPE
from art.classifiers.classifier import ClassifierNeuralNetwork, ClassifierGradients
from art.attacks.attack import Attack
logger = logging.getLogger(__name__)
class VirtualAdversarialMethod(Attack):
"""
This attack was originally proposed by Miyato et al. (2016) and was used for virtual adversarial training.
| Paper link: https://arxiv.org/abs/1507.00677
"""
attack_params = Attack.attack_params + ['eps', 'finite_diff', 'max_iter', 'batch_size']
def __init__(self, classifier, max_iter=10, finite_diff=1e-6, eps=.1, batch_size=1):
"""
Create a VirtualAdversarialMethod instance.
:param classifier: A trained classifier.
:type classifier: :class:`.Classifier`
:param eps: Attack step (max input variation).
:type eps: `float`
:param finite_diff: The finite difference parameter.
:type finite_diff: `float`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param batch_size: Size of the batch on which adversarial samples are generated.
:type batch_size: `int`
"""
from art import NUMPY_DTYPE
from art.attacks.attack import Attack
from art.utils import compute_success, to_categorical, check_and_transform_label_format
logger = logging.getLogger(__name__)
class HopSkipJump(Attack):
"""
Implementation of the HopSkipJump attack from Jianbo et al. (2019). This is a powerful black-box attack that
only requires final class prediction, and is an advanced version of the boundary attack.
| Paper link: https://arxiv.org/abs/1904.02144
"""
attack_params = Attack.attack_params + ['targeted', 'norm', 'max_iter', 'max_eval',
'init_eval', 'init_size', 'curr_iter', 'batch_size']
def __init__(self, classifier, targeted=False, norm=2, max_iter=50, max_eval=10000, init_eval=100, init_size=100):
"""
Create a HopSkipJump attack instance.
:param classifier: A trained classifier.
:type classifier: :class:`.Classifier`
:param targeted: Should the attack target one specific class.
:type targeted: `bool`
:param norm: Order of the norm. Possible values: np.inf or 2.
:type norm: `int`
:param max_iter: Maximum number of iterations.
:type max_iter: `int`
:param max_eval: Maximum number of evaluations for estimating gradient.
:type max_eval: `int`
import numpy as np
from art import NUMPY_DTYPE
from art.attacks.attack import Attack
logger = logging.getLogger('testLogger')
class MarginAttack(Attack):
"""
Implementation of MarginAttack
Paper link: https://openreview.net/pdf?id=B1gHjoRqYQ
"""
attack_params = Attack.attack_params + ['max_iter', 'target_scan_iters', 'final_restore_iters',
'offset', 'metric', 'targeted', 'num_scan_classes', 'restore_lr',
'project_lr_init', 'project_lr_ratio', 'nu', 'verbose']
def __init__(self, classifier, max_iter=200, target_scan_iters=20, final_restore_iters=20, offset=-0.1,
metric='L2', targeted=False, num_scan_classes=None, restore_lr=None, project_lr_init=1.0,
project_lr_ratio=0.1, nu=None, verbose=False):
"""
Create a MarginAttack instance.
:param classifier: A trained model.
:type classifier: :class:`.Classifier`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param target_scan_iters: The number of iterations where restoration move scans candidate adversarial classes
:type target_scan_iters: `int`
:param final_restore_iters: The number of final iterations where projection move is removed