Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
"""Tests for cleverhans.dataset"""
from cleverhans.dataset import Dataset
from cleverhans.devtools.checks import CleverHansTest
class LightweightDataset(Dataset):
"""
A dataset that does not actually load any data so it is cheap to run
in tests.
"""
class TestDataset(CleverHansTest):
"""
Tests for the Dataset class
"""
def test_factory(self):
"""test_factory: Test that dataset->factory->dataset preserves type"""
d1 = LightweightDataset()
factory = d1.get_factory()
d2 = factory()
self.assertTrue(type(d1) is type(d2))
def test_generate_np_targeted_gives_adversarial_example(self):
x_val = np.random.rand(10, 1000)
x_val = np.array(x_val, dtype=np.float32)
feed_labs = np.zeros((10, 10))
feed_labs[np.arange(10), np.random.randint(0, 9, 10)] = 1
x_adv = self.attack.generate_np(x_val,
clip_min=-5., clip_max=5.,
y_target=feed_labs)
new_labs = np.argmax(self.sess.run(self.model.get_logits(x_adv)), axis=1)
worked = np.mean(np.argmax(feed_labs, axis=1) == new_labs)
self.assertTrue(worked > .9)
class TestDeepFool(CleverHansTest):
def setUp(self):
super(TestDeepFool, self).setUp()
self.sess = tf.Session()
self.model = SimpleModel()
self.attack = DeepFool(self.model, sess=self.sess)
def test_generate_np_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, overshoot=0.02, max_iter=50,
nb_candidate=2, clip_min=-5,
clip_max=5)
orig_labs = np.argmax(self.sess.run(self.model.get_logits(x_val)), axis=1)
x_val = np.array(x_val, dtype=np.float32)
for decay_factor in [0.0, 0.5, 1.0]:
x_adv = self.attack.generate_np(x_val, eps=0.5, ord=np.inf,
decay_factor=decay_factor,
clip_min=-5.0, clip_max=5.0)
delta = np.max(np.abs(x_adv - x_val), axis=1)
self.assertClose(delta, 0.5)
def test_multiple_initial_random_step(self):
# There is no initial random step, so nothing to test here
pass
class TestMadryEtAl(CleverHansTest):
def setUp(self):
super(TestMadryEtAl, self).setUp()
self.model = DummyModel('madryetal_dummy_model')
self.sess = tf.Session()
def test_attack_can_be_constructed(self):
# The test passes if this does not raise an exception
self.attack = MadryEtAl(self.model, sess=self.sess)
class TestBasicIterativeMethod(CleverHansTest):
def setUp(self):
super(TestBasicIterativeMethod, self).setUp()
self.model = DummyModel('bim_dummy_model')
self.sess = tf.Session()
# pylint: disable=missing-docstring
import unittest
import numpy as np
from cleverhans.devtools.checks import CleverHansTest
class TestMNISTTutorialPytorch(CleverHansTest):
def test_mnist_tutorial_pytorch(self):
import tensorflow as tf
from cleverhans_tutorials import mnist_tutorial_pytorch
# Run the MNIST tutorial on a dataset of reduced size
with tf.Graph().as_default():
np.random.seed(42)
report = mnist_tutorial_pytorch.mnist_tutorial(
nb_epochs=2,
train_end=5000,
test_end=333,
)
# Check accuracy values contained in the AccuracyReport object
self.assertGreater(report.clean_train_clean_eval, 0.9)
self.assertLess(report.clean_train_adv_eval, 0.10)
from cleverhans.devtools.checks import CleverHansTest
def numpy_kl_with_logits(p_logits, q_logits):
def numpy_softmax(logits):
logits -= np.max(logits, axis=1, keepdims=True)
exp_logits = np.exp(logits)
return exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
p = numpy_softmax(p_logits)
log_p = p_logits - np.log(np.sum(np.exp(p_logits), axis=1, keepdims=True))
log_q = q_logits - np.log(np.sum(np.exp(q_logits), axis=1, keepdims=True))
return (p * (log_p - log_q)).sum(axis=1).mean()
class TestUtilsTF(CleverHansTest):
"""Test class for utils_tf"""
def setUp(self):
super(TestUtilsTF, self).setUp()
self.sess = tf.Session()
def test_clip_by_value_numpy_dtype(self):
# Test that it's possible to use clip_by_value while mixing numpy and tf
clip_min = np.zeros((1,))
clip_max = tf.ones((1,))
x = tf.ones((1,))
# The point of this test is just to make sure the casting logic doesn't raise an exception
utils_tf.clip_by_value(x, clip_min, clip_max)
def test_l2_batch_normalize(self):
from cleverhans.future.torch.attacks.projected_gradient_descent import projected_gradient_descent
class SimpleModel(torch.nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.w1 = torch.tensor([[1.5, .3], [-2, .3]])
self.w2 = torch.tensor([[-2.4, 1.2], [.5, -2.3]])
def forward(self, x):
x = torch.matmul(x, self.w1)
x = torch.sigmoid(x)
x = torch.matmul(x, self.w2)
return x
class CommonAttackProperties(CleverHansTest):
def setUp(self):
# pylint: disable=unidiomatic-typecheck
if type(self) is CommonAttackProperties:
raise SkipTest()
super(CommonAttackProperties, self).setUp()
self.model = SimpleModel()
self.x = torch.randn(100, 2)
self.normalized_x = torch.rand(100, 2) # truncated between [0, 1)
self.red_ind = list(range(1, len(self.x.size())))
self.ord_list = [1, 2, np.inf]
def help_adv_examples_success_rate(self, **kwargs):
x_adv = self.attack(model_fn=self.model, x=self.normalized_x, **kwargs)
_, ori_label = self.model(self.normalized_x).max(1)
# pylint: disable=missing-docstring
import unittest
import numpy as np
from cleverhans.devtools.checks import CleverHansTest
class TestMNISTTutorialJSMA(CleverHansTest):
def test_mnist_tutorial_jsma(self):
import tensorflow as tf
from cleverhans_tutorials import mnist_tutorial_jsma
# Run the MNIST tutorial on a dataset of reduced size
# and disable visualization.
jsma_tutorial_args = {'train_start': 0,
'train_end': 1000,
'test_start': 0,
'test_end': 1666,
'viz_enabled': False,
'source_samples': 1,
'nb_epochs': 2}
g = tf.Graph()
with g.as_default():
self.attack.generate(x_val, sanity_checks=False, grad_sparsity=gs)
self.assertTrue(context.exception)
# sparsity as 1D array should succeed
gs = tf.random.uniform(shape=(100,), minval=90, maxval=99)
x_adv = self.attack.generate(x_val, sanity_checks=False, grad_sparsity=gs)
self.assertTrue(np.array_equal(x_adv.get_shape().as_list(), [100, 2]))
# sparsity vector of wrong size should fail
with self.assertRaises(ValueError) as context:
gs = tf.random.uniform(shape=(101,), minval=90, maxval=99)
x_adv = self.attack.generate(x_val, sanity_checks=False, grad_sparsity=gs)
self.assertTrue(context.exception)
class TestCarliniWagnerL2(CleverHansTest):
def setUp(self):
super(TestCarliniWagnerL2, self).setUp()
self.sess = tf.Session()
self.model = SimpleModel()
self.attack = CarliniWagnerL2(self.model, sess=self.sess)
def test_generate_np_untargeted_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=10)
class DummyAttack(Attack):
def generate(self, x, **kwargs):
return x
# Test that generate_np is NOT permitted without a session.
# The session still needs to be created prior to running the attack.
# TODO: does anyone know why we need to make an unused session and put it in a with statement?
with tf.Session():
attack = DummyAttack(model, sess=None)
with self.assertRaises(Exception) as context:
attack.generate_np(0.)
self.assertTrue(context.exception)
class TestParseParams(CleverHansTest):
def test_parse(self):
sess = tf.Session()
test_attack = Attack(Model('model', 10, {}), sess=sess)
self.assertTrue(test_attack.parse_params({}))
class TestVirtualAdversarialMethod(CleverHansTest):
def setUp(self):
super(TestVirtualAdversarialMethod, self).setUp()
self.sess = tf.Session()
self.sess.as_default()
self.model = DummyModel('virtual_adv_dummy_model')
self.attack = VirtualAdversarialMethod(self.model, sess=self.sess)
"""Tests for cleverhans.serial"""
import numpy as np
import tensorflow as tf
from cleverhans.devtools.checks import CleverHansTest
from cleverhans.serial import PicklableVariable
from cleverhans.serial import load
from cleverhans.serial import save
class TestSerial(CleverHansTest):
"""
Tests for cleverhans.serial
"""
def test_save_and_load_var(self):
"""test_save_and_load_var: Test that we can save and load a
PicklableVariable with joblib
"""
sess = tf.Session()
with sess.as_default():
x = np.ones(1)
xv = PicklableVariable(x)
xv.var.initializer.run()
save("/tmp/var.joblib", xv)
sess.run(tf.assign(xv.var, np.ones(1) * 2))
new_xv = load("/tmp/var.joblib")