How to use the tensorflow.train function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github rlworkgroup / garage / tests / benchmarks / garage / tf / policies / test_benchmark_continuous_mlp_policy.py View on Github external
size_in_transitions=params['replay_buffer_size'],
            time_horizon=params['n_rollout_steps'])

        ddpg = DDPG(env_spec=env.spec,
                    policy=policy,
                    qf=qf,
                    replay_buffer=replay_buffer,
                    steps_per_epoch=params['steps_per_epoch'],
                    policy_lr=params['policy_lr'],
                    qf_lr=params['qf_lr'],
                    target_update_tau=params['tau'],
                    n_train_steps=params['n_train_steps'],
                    discount=params['discount'],
                    min_buffer_size=int(1e4),
                    exploration_strategy=action_noise,
                    policy_optimizer=tf.train.AdamOptimizer,
                    qf_optimizer=tf.train.AdamOptimizer)

        # Set up logger since we are not using run_experiment
        tabular_log_file = osp.join(log_dir, 'progress.csv')
        dowel_logger.add_output(dowel.StdOutput())
        dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))
        dowel_logger.add_output(dowel.TensorBoardOutput(log_dir))

        runner.setup(ddpg, env, sampler_args=dict(n_envs=12))
        runner.train(n_epochs=params['n_epochs'],
                     batch_size=params['n_rollout_steps'])

        dowel_logger.remove_all()

        return tabular_log_file
github tensorflow / cleverhans / cleverhans / experimental / certification / optimization.py View on Github external
self.dual_object.get_psd_product(self.stopped_eig_vec_estimate))
    # Penalizing negative of min eigen value because we want min eig value
    # to be positive
    self.total_objective = (
        self.dual_object.unconstrained_objective
        + 0.5 * tf.square(
            tf.maximum(-self.penalty_placeholder * self.eig_val_estimate, 0)))
    global_step = tf.Variable(0, trainable=False)
    # Set up learning rate as a placeholder
    self.learning_rate = tf.placeholder(tf.float32, shape=[])

    # Set up the optimizer
    if self.params['optimizer'] == 'adam':
      self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
    elif self.params['optimizer'] == 'adagrad':
      self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate)
    elif self.params['optimizer'] == 'momentum':
      self.optimizer = tf.train.MomentumOptimizer(
          learning_rate=self.learning_rate,
          momentum=self.params['momentum_parameter'],
          use_nesterov=True)
    else:
      self.optimizer = tf.train.GradientDescentOptimizer(
          learning_rate=self.learning_rate)

    # Write out the projection step
    self.train_step = self.optimizer.minimize(
        self.total_objective, global_step=global_step)

    self.sess.run(tf.global_variables_initializer())

    # Projecting the dual variables
github daylen / audio-style-classifier / train.py View on Github external
def train():
    header, train, val, test, data_dict = get_data(N_CLASSES, MERGE_TAGS, SPLIT_RANDOMLY)
    print header

    weights, biases = get_vars()

    coord = tf.train.Coordinator()

    data_man_train, pred, cost, auc_op, update_auc_op = get_end_ops(train, data_dict, coord, weights, biases)
    data_man_val, pred_val, cost_val, auc_op_val, update_auc_op_val = get_end_ops(val, data_dict, coord, weights, biases)

    optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cost)

    saver = tf.train.Saver()
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    sess.run(tf.initialize_local_variables())

    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    data_man_train.start_threads(sess)
    data_man_val.start_threads(sess)

    if not os.path.exists(SAVE_DIR):
github leemengtaiwan / cat-recognition-train / train.py View on Github external
    "adadelta": lambda lr, arg1, arg2, arg3=None: tf.train.AdadeltaOptimizer(lr, arg1, arg2),
    "adagrad": lambda lr, arg1, arg2=None, arg3=None: tf.train.AdagradOptimizer(lr, arg1),
github IsaacChanghau / AmusingPythonCodes / selu_activation_visualization / trainer.py View on Github external
if config.lr_weight_decay:
            self.learning_rate = tf.train.exponential_decay(self.learning_rate, global_step=self.global_step,
                                                            decay_steps=10000, decay_rate=0.5, staircase=True,
                                                            name='decaying_learning_rate')
        self.check_op = tf.no_op()
        self.optimizer = tf.contrib.layers.optimize_loss(loss=self.model.loss, global_step=self.global_step,
                                                         learning_rate=self.learning_rate,
                                                         optimizer=tf.train.AdamOptimizer, clip_gradients=20.0,
                                                         name='optimizer_loss')

        self.summary_op = tf.summary.merge_all()
        self.plot_summary_op = tf.summary.merge_all(key='plot_summaries')
        self.saver = tf.train.Saver(max_to_keep=100)
        self.summary_writer = tf.summary.FileWriter(self.train_dir)
        self.checkpoint_secs = 600  # 10 min
        self.supervisor = tf.train.Supervisor(logdir=self.train_dir, is_chief=True, saver=None, summary_op=None,
                                              summary_writer=self.summary_writer, save_summaries_secs=300,
                                              save_model_secs=self.checkpoint_secs, global_step=self.global_step)

        # intra_op_parallelism_threads=1, inter_op_parallelism_threads=1
        session_config = tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True),
                                        device_count={'GPU': 1})
        self.session = self.supervisor.prepare_or_wait_for_session(config=session_config)
        self.ckpt_path = config.checkpoint
        if self.ckpt_path is not None:
            log.info("Checkpoint path: %s", self.ckpt_path)
            self.pretrain_saver.restore(self.session, self.ckpt_path)
            log.info("Loaded the pretrain parameters from the provided checkpoint path")
github tensorflow / tensorflow / tensorflow / contrib / model_pruning / examples / cifar10 / cifar10_pruning.py View on Github external
grads = opt.compute_gradients(total_loss)

  # Apply gradients.
  apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

  # Add histograms for trainable variables.
  for var in tf.trainable_variables():
    tf.summary.histogram(var.op.name, var)

  # Add histograms for gradients.
  for grad, var in grads:
    if grad is not None:
      tf.summary.histogram(var.op.name + '/gradients', grad)

  # Track the moving averages of all trainable variables.
  variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,
                                                        global_step)
  variables_averages_op = variable_averages.apply(tf.trainable_variables())

  with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
    train_op = tf.no_op(name='train')

  return train_op
github Showmax / kinetics-downloader / sound_to_tfrecords.py View on Github external
def int64_feature(value):
  """
  Get an int64 feature Tensor.
  :param value:
  :return:
  """
  return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
github dhruvramani / Neural-Architecture-Search-with-RL / model_conv / src / config.py View on Github external
def __init__(self, t_args):
                self.learning_rate = t_args.lr
                self.dropout = t_args.dropout
                if t_args.opt.lower() not in ["adam", "rmsprop", "sgd", "normal"]: 
                    raise ValueError('Undefined type of optmizer')
                else:  
                    self.optimizer = {"adam": tf.train.AdamOptimizer, "rmsprop": tf.train.RMSPropOptimizer, "sgd": tf.train.GradientDescentOptimizer, "normal": tf.train.Optimizer}[t_args.opt.lower()](self.learning_rate)
github yashkant / Padam-Tensorflow / resnet-18 / additional experiments / run_p_exp.py View on Github external
}

hp = hyperparameters[dataset]
epochs = hp['epoch']
batch_size = hp['batch_size']

img_rows, img_cols = 32, 32
train_size = trainX.shape[0]

trainX = trainX.astype('float32')
trainX = trainX/255
testX = testX.astype('float32')
testX = testX/255
trainY = kutils.to_categorical(trainY)
testY = kutils.to_categorical(testY)
tf.train.create_global_step()
# (trainX, trainY), (testX, testY) = (trainX[:100], trainY[:100]), (testX[:100], testY[:100])

datagen_train = ImageDataGenerator(preprocessing_function=preprocess,horizontal_flip=True)
datagen_test = ImageDataGenerator(preprocessing_function=normalize)

optim_array = ['padam']
p_values = [0.25, 0.125, 0.0625]

history = {}

for i in range(1, 3):
    if(i != 0):
        continue_training = True # Flag to continue training   
        continue_epoch = (i)*30
    else:
        continue_training = False
github rasbt / python-machine-learning-book-2nd-edition / code / ch14 / ch14.py View on Github external
stddev=0.25),
        name='weight')
    bias = tf.Variable(0.0, name='bias')
    
    ## build the model
    y_hat = tf.add(weight * tf_x, bias, 
                   name='y_hat')
    print(y_hat)
    
    ## compute the cost
    cost = tf.reduce_mean(tf.square(tf_y - y_hat), 
                          name='cost')
    print(cost)
    
    ## train
    optim = tf.train.GradientDescentOptimizer(
        learning_rate=0.001)
    train_op = optim.minimize(cost, name='train_op')




## create a random toy dataset for regression


np.random.seed(0)

def make_random_data():
    x = np.random.uniform(low=-2, high=4, size=200)
    y = []
    for t in x:
        r = np.random.normal(loc=0.0,