Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_logit_layer_name_is_logits(self):
model = KerasModelWrapper(self.model)
logits_name = model._get_logits_name()
self.assertEqual(logits_name, 'l2')
# Calculate training error
if testing:
_, train_acc, train_adv_acc = model.evaluate(x_train, y_train,
batch_size=batch_size,
verbose=0)
report.train_clean_train_clean_eval = train_acc
report.train_clean_train_adv_eval = train_adv_acc
print("Repeating the process, using adversarial training")
# Redefine Keras model
model_2 = cnn_model(img_rows=img_rows, img_cols=img_cols,
channels=nchannels, nb_filters=64,
nb_classes=nb_classes)
model_2(model_2.input)
wrap_2 = KerasModelWrapper(model_2)
fgsm_2 = FastGradientMethod(wrap_2, sess=sess)
# Use a loss function based on legitimate and adversarial examples
adv_loss_2 = get_adversarial_loss(model_2, fgsm_2, fgsm_params)
adv_acc_metric_2 = get_adversarial_acc_metric(model_2, fgsm_2, fgsm_params)
model_2.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=adv_loss_2,
metrics=['accuracy', adv_acc_metric_2]
)
# Train an MNIST model
model_2.fit(x_train, y_train,
batch_size=batch_size,
epochs=nb_epochs,
validation_data=(x_test, y_test),
#}
#wrap = KerasModelWrapper(self.model)
#pgd = ProjectedGradientDescent(wrap, sess=self.sess, nb_iter=20)
#pgd_params = {'eps': self.eps}
##attack = pgd.generate(x, y=y, **pgd_params)
#def attack(x):
# return pgd.generate(x, **pgd_params)
#loss = CrossEntropy(wrap, smoothing=0.1, attack=attack)
#def evaluate():
# #print("XDDD %f", self.sess.run(loss))
# print('Test accuracy on legitimate examples: %0.4f' % self.score(X, y))
#train(self.sess, loss, X.astype(np.float32), Y.astype(np.float32),
# args=train_params, evaluate=evaluate)
######################################
Y = self.lbl_enc.transform(y.reshape(-1, 1))
wrap_2 = KerasModelWrapper(self.model)
fgsm_2 = ProjectedGradientDescent(wrap_2, sess=self.sess)
self.model(self.model.input)
fgsm_params = {'eps': self.eps}
# Use a loss function based on legitimate and adversarial examples
adv_loss_2 = get_adversarial_loss(self.model, fgsm_2, fgsm_params)
adv_acc_metric_2 = get_adversarial_acc_metric(self.model, fgsm_2, fgsm_params)
self.model.compile(
#optimizer=keras.optimizers.Adam(self.learning_rate),
optimizer=keras.optimizers.Nadam(),
loss=adv_loss_2,
metrics=['accuracy', adv_acc_metric_2]
)
self.model.fit(X, Y,
batch_size=self.batch_size,
epochs=self.epochs,
# Evaluate the substitute model on clean test examples
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds_sub, X_test, Y_test, args=eval_params)
accuracies['sub'] = acc
print('substitution model accuracy:', acc)
# Find the correctly predicted labels
original_predict = batch_eval(sess, [x], [bbox_preds], [X_test],
args=eval_params)[0]
original_class = np.argmax(original_predict, axis = 1)
true_class = np.argmax(Y_test, axis = 1)
mask = true_class == original_class
print(np.sum(mask), "out of", mask.size, "are correct labeled,", len(X_test[mask]))
# Initialize the Fast Gradient Sign Method (FGSM) attack object.
wrap = KerasModelWrapper(model_sub)
# Craft adversarial examples using the substitute
eval_params = {'batch_size': batch_size}
if attack == "fgsm":
attacker_params = {'eps': 0.4, 'ord': np.inf, 'clip_min': 0., 'clip_max': 1.}
fgsm = FastGradientMethod(wrap, sess=sess)
x_adv_sub = fgsm.generate(x, **attacker_params)
attacker = fgsm
adv_inputs = X_test
ori_labels = Y_test
print("Running FGSM attack...")
else:
print("Running Carlini and Wagner\'s L2 attack...")
yname = "y"
def generate(self, x_val, **kwargs):
"""
Generate adversarial samples and return them in a Numpy array.
:param x_val:
:param y_val: If self.targeted is true, then y_val represents the target labels. If self.targeted is false, then
targets are the original class labels.
:return: A Numpy array holding the adversarial examples.
"""
# Parse and save attack-specific parameters
params_cpy = dict(kwargs)
y_val = params_cpy.pop('y_val', None)
assert self.set_params(**params_cpy)
model = KerasModelWrapper(self.classifier.model)
attack = CWL2(self.sess, model, self.batch_size, self.confidence, self.targeted, self.learning_rate,
self.binary_search_steps, self.max_iterations, self.abort_early, self.initial_const,
self.clip_min, self.clip_max, self.classifier.model.output_shape[1], x_val.shape[1:])
if y_val is None:
# No labels provided, use model prediction as correct class
x = tf.placeholder(dtype=tf.float32, shape=self.classifier.model.get_input_shape_at(0))
y_val = self.sess.run(tf.argmax(self.classifier.model(x), axis=1), {x: x_val})
y_val = to_categorical(y_val, self.classifier.model.get_output_shape_at(-1)[-1])
return attack.attack(x_val, y_val)
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'train_dir': train_dir,
'filename': filename
}
rng = np.random.RandomState([2017, 8, 30])
if not os.path.exists(train_dir):
os.mkdir(train_dir)
ckpt = tf.train.get_checkpoint_state(train_dir)
print(train_dir, ckpt)
ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
wrap = KerasModelWrapper(model)
if load_model and ckpt_path:
saver = tf.train.Saver()
print(ckpt_path)
saver.restore(sess, ckpt_path)
print("Model loaded from: {}".format(ckpt_path))
evaluate()
else:
print("Model was not loaded, training from scratch.")
loss = CrossEntropy(wrap, smoothing=label_smoothing)
train(sess, loss, x_train, y_train, evaluate=evaluate,
args=train_params, rng=rng)
# Calculate training error
if testing:
eval_params = {'batch_size': batch_size}
:param Y_train: the training labels for the oracle
:param X_test: the testing data for the oracle
:param Y_test: the testing labels for the oracle
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param rng: numpy.random.RandomState
:return:
"""
# Define Keras-based TF model graph (for the black-box model)
nb_filters = 64
model = cnn_model(nb_filters=nb_filters, nb_classes=nb_classes)
# Wrap the model in KerasModelWrapper
model = KerasModelWrapper(model, nb_classes)
loss = LossCrossEntropy(model, smoothing=0.1)
predictions = model.get_logits(x)
print("Defined TensorFlow model graph.")
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
train(sess, loss, x, y, X_train, Y_train, args=train_params, rng=rng)
# Print out the accuracy on legitimate data
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, predictions, X_test, Y_test,
args=eval_params)
def _get_pert(self, X, Y, eps:float, model, ord):
x = tf.placeholder(tf.float32, shape=([None] + list(self.n_features)))
y = tf.placeholder(tf.float32, shape=(None, self.n_classes))
wrap = KerasModelWrapper(model)
pgd = ProjectedGradientDescent(wrap, sess=self.sess)
if eps >= 0.05:
adv_x = pgd.generate(x, y=y, eps=eps, ord=ord)
else:
adv_x = pgd.generate(x, y=y, eps=eps, eps_iter=eps/5, ord=ord)
adv_x = tf.stop_gradient(adv_x)
ret = adv_x - x
return ret.eval(feed_dict={x: X, y: Y}, session=self.sess)
def __init__(self, model):
"""
Create a wrapper for a Keras model
:param model: A Keras model
"""
super(KerasModelWrapper, self).__init__(None, None, {})
if model is None:
raise ValueError('model argument must be supplied.')
self.model = model
self.keras_model = None