How to use the cleverhans.compat.reduce_sum function in cleverhans

To help you get started, we’ve selected a few cleverhans examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorflow / cleverhans / cleverhans / attacks / saliency_map_method.py View on Github external
# Create a 2D numpy array of scores for each pair of candidate features
    scores = tf.cast(scores_mask, tf_dtype) \
        * (-target_sum * other_sum) * zero_diagonal

    # Extract the best two pixels
    best = tf.argmax(
        tf.reshape(scores, shape=[-1, nb_features * nb_features]), axis=1)

    p1 = tf.mod(best, nb_features)
    p2 = tf.floordiv(best, nb_features)
    p1_one_hot = tf.one_hot(p1, depth=nb_features)
    p2_one_hot = tf.one_hot(p2, depth=nb_features)

    # Check if more modification is needed for each sample
    mod_not_done = tf.equal(reduce_sum(y_in * preds_onehot, axis=1), 0)
    cond = mod_not_done & (reduce_sum(domain_in, axis=1) >= 2)

    # Update the search domain
    cond_float = tf.reshape(tf.cast(cond, tf_dtype), shape=[-1, 1])
    to_mod = (p1_one_hot + p2_one_hot) * cond_float

    domain_out = domain_in - to_mod

    # Apply the modification to the images
    to_mod_reshape = tf.reshape(
        to_mod, shape=([-1] + x_in.shape[1:].as_list()))
    if increase:
      x_out = tf.minimum(clip_max, x_in + to_mod_reshape * theta)
    else:
      x_out = tf.maximum(clip_min, x_in - to_mod_reshape * theta)

    # Increase the iterator, and check if all misclassifications are done
github tensorflow / cleverhans / cleverhans / attacks / spsa.py View on Github external
This follows the same interface as `loss_fn` for TensorOptimizer and
  projected_optimization, i.e. it returns a batch of loss values.
  """
  if num_classes is not None:
    warnings.warn("`num_classes` is depreciated. Switch to `nb_classes`."
                  " `num_classes` may be removed on or after 2019-04-23.")
    nb_classes = num_classes
    del num_classes
  if 'int' in str(label.dtype):
    logit_mask = tf.one_hot(label, depth=nb_classes, axis=-1)
  else:
    logit_mask = label
  if 'int' in str(logit_mask.dtype):
    logit_mask = tf.to_float(logit_mask)
  try:
    label_logits = reduce_sum(logit_mask * model_logits, axis=-1)
  except TypeError:
    raise TypeError("Could not take row-wise dot product between "
                    "logit mask, of dtype " + str(logit_mask.dtype)
                    + " and model_logits, of dtype "
                    + str(model_logits.dtype))
  logits_with_target_label_neg_inf = model_logits - logit_mask * 99999
  highest_nonlabel_logits = reduce_max(
      logits_with_target_label_neg_inf, axis=-1)
  loss = highest_nonlabel_logits - label_logits
  return loss
github tensorflow / cleverhans / cleverhans / attacks / saliency_map_method.py View on Github external
# Create a 2D numpy array of scores for each pair of candidate features
    scores = tf.cast(scores_mask, tf_dtype) \
        * (-target_sum * other_sum) * zero_diagonal

    # Extract the best two pixels
    best = tf.argmax(
        tf.reshape(scores, shape=[-1, nb_features * nb_features]), axis=1)

    p1 = tf.mod(best, nb_features)
    p2 = tf.floordiv(best, nb_features)
    p1_one_hot = tf.one_hot(p1, depth=nb_features)
    p2_one_hot = tf.one_hot(p2, depth=nb_features)

    # Check if more modification is needed for each sample
    mod_not_done = tf.equal(reduce_sum(y_in * preds_onehot, axis=1), 0)
    cond = mod_not_done & (reduce_sum(domain_in, axis=1) >= 2)

    # Update the search domain
    cond_float = tf.reshape(tf.cast(cond, tf_dtype), shape=[-1, 1])
    to_mod = (p1_one_hot + p2_one_hot) * cond_float

    domain_out = domain_in - to_mod

    # Apply the modification to the images
    to_mod_reshape = tf.reshape(
        to_mod, shape=([-1] + x_in.shape[1:].as_list()))
    if increase:
      x_out = tf.minimum(clip_max, x_in + to_mod_reshape * theta)
    else:
      x_out = tf.maximum(clip_min, x_in - to_mod_reshape * theta)
github tensorflow / cleverhans / cleverhans / attacks / elastic_net_method.py View on Github external
self.assign_slack = self.assign_newimg
    self.assign_slack += tf.multiply(self.zt,
                                     self.assign_newimg - self.newimg)

    # --------------------------------
    self.setter = tf.assign(self.newimg, self.assign_newimg)
    self.setter_y = tf.assign(self.slack, self.assign_slack)

    # prediction BEFORE-SOFTMAX of the model
    self.output = model.get_logits(self.newimg)
    self.output_y = model.get_logits(self.slack)

    # distance to the input data
    self.l2dist = reduce_sum(tf.square(self.newimg-self.timg),
                             list(range(1, len(shape))))
    self.l2dist_y = reduce_sum(tf.square(self.slack-self.timg),
                               list(range(1, len(shape))))
    self.l1dist = reduce_sum(tf.abs(self.newimg-self.timg),
                             list(range(1, len(shape))))
    self.l1dist_y = reduce_sum(tf.abs(self.slack-self.timg),
                               list(range(1, len(shape))))
    self.elasticdist = self.l2dist + tf.multiply(self.l1dist,
                                                 self.beta_t)
    self.elasticdist_y = self.l2dist_y + tf.multiply(self.l1dist_y,
                                                     self.beta_t)
    if self.decision_rule == 'EN':
      self.crit = self.elasticdist
      self.crit_p = 'Elastic'
    else:
      self.crit = self.l1dist
      self.crit_p = 'L1'
github tensorflow / cleverhans / cleverhans / attacks / elastic_net_method.py View on Github external
list(range(1, len(shape))))
    self.l1dist_y = reduce_sum(tf.abs(self.slack-self.timg),
                               list(range(1, len(shape))))
    self.elasticdist = self.l2dist + tf.multiply(self.l1dist,
                                                 self.beta_t)
    self.elasticdist_y = self.l2dist_y + tf.multiply(self.l1dist_y,
                                                     self.beta_t)
    if self.decision_rule == 'EN':
      self.crit = self.elasticdist
      self.crit_p = 'Elastic'
    else:
      self.crit = self.l1dist
      self.crit_p = 'L1'

    # compute the probability of the label class versus the maximum other
    real = reduce_sum((self.tlab) * self.output, 1)
    real_y = reduce_sum((self.tlab) * self.output_y, 1)
    other = reduce_max((1 - self.tlab) * self.output -
                       (self.tlab * 10000), 1)
    other_y = reduce_max((1 - self.tlab) * self.output_y -
                         (self.tlab * 10000), 1)

    if self.TARGETED:
      # if targeted, optimize for making the other class most likely
      loss1 = tf.maximum(ZERO(), other - real + self.CONFIDENCE)
      loss1_y = tf.maximum(ZERO(), other_y - real_y + self.CONFIDENCE)
    else:
      # if untargeted, optimize for making this class least likely.
      loss1 = tf.maximum(ZERO(), real - other + self.CONFIDENCE)
      loss1_y = tf.maximum(ZERO(), real_y - other_y + self.CONFIDENCE)

    # sum up the losses
github tensorflow / cleverhans / cleverhans / utils_tf.py View on Github external
def kl_with_logits(p_logits, q_logits, scope=None,
                   loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES):
  """Helper function to compute kl-divergence KL(p || q)
  """
  with tf.name_scope(scope, "kl_divergence") as name:
    p = tf.nn.softmax(p_logits)
    p_log = tf.nn.log_softmax(p_logits)
    q_log = tf.nn.log_softmax(q_logits)
    loss = reduce_mean(reduce_sum(p * (p_log - q_log), axis=1),
                       name=name)
    tf.losses.add_loss(loss, loss_collection)
    return loss
github tensorflow / cleverhans / cleverhans / attacks / carlini_wagner_l2.py View on Github external
tf_dtype, (batch_size, num_labels), name='assign_tlab')
    self.assign_const = tf.placeholder(
        tf_dtype, [batch_size], name='assign_const')

    # the resulting instance, tanh'd to keep bounded from clip_min
    # to clip_max
    self.newimg = (tf.tanh(modifier + self.timg) + 1) / 2
    self.newimg = self.newimg * (clip_max - clip_min) + clip_min

    # prediction BEFORE-SOFTMAX of the model
    self.output = model.get_logits(self.newimg)

    # distance to the input data
    self.other = (tf.tanh(self.timg) + 1) / \
        2 * (clip_max - clip_min) + clip_min
    self.l2dist = reduce_sum(
        tf.square(self.newimg - self.other), list(range(1, len(shape))))

    # compute the probability of the label class versus the maximum other
    real = reduce_sum((self.tlab) * self.output, 1)
    other = reduce_max((1 - self.tlab) * self.output - self.tlab * 10000,
                       1)

    if self.TARGETED:
      # if targeted, optimize for making the other class most likely
      loss1 = tf.maximum(ZERO(), other - real + self.CONFIDENCE)
    else:
      # if untargeted, optimize for making this class least likely.
      loss1 = tf.maximum(ZERO(), real - other + self.CONFIDENCE)

    # sum up the losses
    self.loss2 = reduce_sum(self.l2dist)
github tensorflow / cleverhans / cleverhans / utils_tf.py View on Github external
def l2_batch_normalize(x, epsilon=1e-12, scope=None):
  """
  Helper function to normalize a batch of vectors.
  :param x: the input placeholder
  :param epsilon: stabilizes division
  :return: the batch of l2 normalized vector
  """
  with tf.name_scope(scope, "l2_batch_normalize") as name_scope:
    x_shape = tf.shape(x)
    x = tf.contrib.layers.flatten(x)
    x /= (epsilon + reduce_max(tf.abs(x), 1, keepdims=True))
    square_sum = reduce_sum(tf.square(x), 1, keepdims=True)
    x_inv_norm = tf.rsqrt(np.sqrt(epsilon) + square_sum)
    x_norm = tf.multiply(x, x_inv_norm)
    return tf.reshape(x_norm, x_shape, name_scope)
github tensorflow / cleverhans / cleverhans / attacks / fast_gradient_method.py View on Github external
# The following line should not change the numerical results.
    # It applies only because `optimal_perturbation` is the output of
    # a `sign` op, which has zero derivative anyway.
    # It should not be applied for the other norms, where the
    # perturbation has a non-zero derivative.
    optimal_perturbation = tf.stop_gradient(optimal_perturbation)
  elif ord == 1:
    abs_grad = tf.abs(grad)
    sign = tf.sign(grad)
    max_abs_grad = tf.reduce_max(abs_grad, red_ind, keepdims=True)
    tied_for_max = tf.to_float(tf.equal(abs_grad, max_abs_grad))
    num_ties = tf.reduce_sum(tied_for_max, red_ind, keepdims=True)
    optimal_perturbation = sign * tied_for_max / num_ties
  elif ord == 2:
    square = tf.maximum(avoid_zero_div,
                        reduce_sum(tf.square(grad),
                                   reduction_indices=red_ind,
                                   keepdims=True))
    optimal_perturbation = grad / tf.sqrt(square)
  else:
    raise NotImplementedError("Only L-inf, L1 and L2 norms are "
                              "currently implemented.")

  # Scale perturbation to be the solution for the norm=eps rather than
  # norm=1 problem
  scaled_perturbation = utils_tf.mul(eps, optimal_perturbation)
  return scaled_perturbation
github tensorflow / cleverhans / cleverhans / attacks / fast_gradient_method.py View on Github external
if clip_min is not None:
    asserts.append(utils_tf.assert_greater_equal(
        x, tf.cast(clip_min, x.dtype)))

  if clip_max is not None:
    asserts.append(utils_tf.assert_less_equal(x, tf.cast(clip_max, x.dtype)))

  # Make sure the caller has not passed probs by accident
  assert logits.op.type != 'Softmax'

  if y is None:
    # Using model predictions as ground truth to avoid label leaking
    preds_max = reduce_max(logits, 1, keepdims=True)
    y = tf.to_float(tf.equal(logits, preds_max))
    y = tf.stop_gradient(y)
  y = y / reduce_sum(y, 1, keepdims=True)

  # Compute loss
  loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
  if targeted:
    loss = -loss

  # Define gradient of loss wrt input
  grad, = tf.gradients(loss, x)

  optimal_perturbation = optimize_linear(grad, eps, ord)

  # Add perturbation to original example to obtain adversarial example
  adv_x = x + optimal_perturbation

  # If clipping is needed, reset all values outside of [clip_min, clip_max]
  if (clip_min is not None) or (clip_max is not None):