Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import torch
import torch.nn as nn
from ..attack import Attack
class IFGSM(Attack):
"""
I-FGSM attack in the paper 'Adversarial Examples in the Physical World'
[https://arxiv.org/abs/1607.02533]
Arguments:
model (nn.Module): a model to attack.
eps (float): epsilon in the paper. (DEFALUT : 4/255)
alpha (float): alpha in the paper. (DEFALUT : 1/255)
iters (int): max iterations. (DEFALUT : 0)
.. note:: With 0 iters, iters will be automatically decided with the formula in the paper.
"""
def __init__(self, model, eps=4/255, alpha=1/255, iters=0):
super(IFGSM, self).__init__("IFGSM", model)
self.eps = eps
self.alpha = alpha
import torch
import torch.nn as nn
from ..attack import Attack
class IterLL(Attack):
"""
iterative least-likely class attack in the paper 'Adversarial Examples in the Physical World'
[https://arxiv.org/abs/1607.02533]
Arguments:
model (nn.Module): a model to attack.
eps (float): epsilon in the paper. (DEFALUT : 4/255)
alpha (float): alpha in the paper. (DEFALUT : 1/255)
iters (int): max iterations. (DEFALUT : 0)
.. note:: With 0 iters, iters will be automatically decided with the formula in the paper.
"""
def __init__(self, model, eps=4/255, alpha=1/255, iters=0):
super(IterLL, self).__init__("IterLL", model)
self.eps = eps
self.alpha = alpha
import torch
import torch.nn as nn
import torch.optim as optim
from ..attack import Attack
class CW(Attack):
"""
CW(L2) attack in the paper 'Towards Evaluating the Robustness of Neural Networks'
[https://arxiv.org/abs/1608.04644]
Arguments:
model (nn.Module): a model to attack.
targeted (bool): (DEFALUT : False)
True - change image closer to a given label
False - change image away from a right label
c (float): c in the paper. (DEFALUT : 1e-4)
kappa (float): kappa (also written as 'confidence') in the paper. (DEFALUT : 0)
iters (int): max iterations. (DEFALUT : 1000)
lr (float): learning rate of the
izer. (DEFALUT : 0.01)
.. note:: There are serveral NOT IMPLEMENTED part of the paper/other codes.
import torch
import torch.nn as nn
from ..attack import Attack
class FGSM(Attack):
"""
FGSM attack in the paper 'Explaining and harnessing adversarial examples'
[https://arxiv.org/abs/1412.6572]
Arguments:
model (nn.Module): a model to attack.
eps (float): epsilon in the paper. (DEFALUT : 0.007)
"""
def __init__(self, model, eps=0.007):
super(FGSM, self).__init__("FGSM", model)
self.eps = eps
def forward(self, images, labels):
images = images.to(self.device)
labels = labels.to(self.device)
import torch
import torch.nn as nn
from ..attack import Attack
class PGD(Attack):
"""
PGD attack in the paper 'Towards Deep Learning Models Resistant to Adversarial Attacks'
[https://arxiv.org/abs/1706.06083]
Arguments:
model (nn.Module): a model to attack.
eps (float): epsilon in the paper. (DEFALUT : 0.3)
alpha (float): alpha in the paper. (DEFALUT : 2/255)
iters (int): max iterations. (DEFALUT : 40)
"""
def __init__(self, model, eps=0.3, alpha=2/255, iters=40):
super(PGD, self).__init__("PGD", model)
self.eps = eps
self.alpha = alpha
self.iters = iters
import torch
import torch.nn as nn
from ..attack import Attack
class APGD(Attack):
"""
Comment on "Adv-BNN: Improved Adversarial Defense through Robust Bayesian Neural Network"
[https://arxiv.org/abs/1907.00895]
Arguments:
model (nn.Module): a model to attack.
eps (float): epsilon in the PGD paper. (DEFALUT : 0.3)
alpha (float): alpha in the PGD paper. (DEFALUT : 2/255)
iters (int): max iterations. (DEFALUT : 40)
sampling (int) : the number of sampling models. (DEFALUT : 100)
"""
def __init__(self, model, eps=0.3, alpha=2/255, iters=40, sampling=10):
super(APGD, self).__init__("APGD", model)
self.eps = eps
self.alpha = alpha
import torch
import torch.nn as nn
from ..attack import Attack
class RFGSM(Attack):
"""
R+FGSM attack in the paper 'Ensemble Adversarial Training : Attacks and Defences'
[https://arxiv.org/abs/1705.07204]
Arguments:
model (nn.Module): a model to attack.
eps (float): epsilon in the paper. (DEFALUT : 16/255)
alpha (float): alpha in the paper. (DEFALUT : 8/255)
iters (int): max iterations. (DEFALUT : 1)
"""
def __init__(self, model, eps=16/255, alpha=8/255, iters=1):
super(RFGSM, self).__init__("RFGSM", model)
self.eps = eps
self.alpha = alpha
self.iters = iters
cost = loss(outputs, labels).to(self.device)
cost.backward()
adv_images = images + self.alpha*images.grad.sign()
a = torch.clamp(images - self.eps, min=0)
b = (adv_images>=a).float()*adv_images + (a>adv_images).float()*a
c = (b > images+self.eps).float()*(images+self.eps) + (images+self.eps >= b).float()*b
images = torch.clamp(c, max=1).detach_()
adv_images = images
return adv_images
class IterLL(Attacks):
"""
iterative least-likely class attack in the paper 'Adversarial Examples in the Physical World'
[https://arxiv.org/abs/1607.02533]
Arguments:
model (nn.Module): a model to attack.
eps (float): epsilon in the paper. (DEFALUT : 4/255)
alpha (float): alpha in the paper. (DEFALUT : 1/255)
iters (int): max iterations. (DEFALUT : 0)
.. note:: With 0 iters, iters will be automatically decided with the formula in the paper.
"""
def __init__(self, model, eps=4/255, alpha=1/255, iters=0):
super(IterLL, self).__init__("IterLL", model)
self.eps = eps
self.alpha = alpha
optimizer.step()
# Early Stop when loss does not converge.
if step % (self.iters//10) == 0 :
if cost > prev :
print('CW Attack is stopped due to CONVERGENCE....')
return a
prev = cost
print('- CW Attack Progress : %2.2f %% ' %((step+1)/self.iters*100), end='\r')
adv_images = (1/2*(nn.Tanh()(w) + 1)).detach_()
return adv_images
class PGD(Attacks):
"""
CW attack in the paper 'Towards Deep Learning Models Resistant to Adversarial Attacks'
[https://arxiv.org/abs/1706.06083]
Arguments:
model (nn.Module): a model to attack.
eps (float): epsilon in the paper. (DEFALUT : 0.3)
alpha (float): alpha in the paper. (DEFALUT : 2/255)
iters (int): max iterations. (DEFALUT : 40)
"""
def __init__(self, model, eps=0.3, alpha=2/255, iters=40):
super(PGD, self).__init__("PGD", model)
self.eps = eps
self.alpha = alpha
self.iters = iters
outputs = model(adv_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.to(self.device)).sum()
print('- Evaluation Progress : %2.2f %% ' %((step+1)/total_batch*100), end='\r')
accuracy = 100 * float(correct) / total
print('\n- Accuracy of model : %f %%' % (accuracy))
return accuracy
'''
class FGSM(Attacks):
"""
FGSM attack in the paper 'Explaining and harnessing adversarial examples'
[https://arxiv.org/abs/1412.6572]
Arguments:
model (nn.Module): a model to attack.
eps (float): epsilon in the paper. (DEFALUT : 0.007)
"""
def __init__(self, model, eps=0.007):
super(FGSM, self).__init__("FGSM", model)
self.eps = eps
def __call__(self, images, labels):
images = images.to(self.device)
labels = labels.to(self.device)