How to use the tensorflow.float32 function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Simon4Yan / Learning-via-Translation / SPGAN / test_spgan.py View on Github external
""" param """
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset', dest='dataset', default='market2duke', help='which dataset to use')
parser.add_argument('--crop_size', dest='crop_size', type=int, default=256, help='then crop to this size')
args = parser.parse_args()

dataset = args.dataset
crop_size = args.crop_size


""" run """
with tf.Session() as sess:
    a_real = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size, 3])
    b_real = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size, 3])

    a2b = models.generator(a_real, 'a2b')
    b2a = models.generator(b_real, 'b2a')
    b2a2b = models.generator(b2a, 'a2b', reuse=True)
    a2b2a = models.generator(a2b, 'b2a', reuse=True)

    #--retore--#
    saver = tf.train.Saver()
    ckpt_path = utils.load_checkpoint('./checkpoints/' + dataset + '_spgan', sess, saver)
    saver.restore(sess, ckpt_path)

    if ckpt_path is None:
        raise Exception('No checkpoint!')
    else:
        print('Copy variables from % s' % ckpt_path)
github wblgers / tensorflow_stacked_denoising_autoencoder / AE_Softmax_MNIST.py View on Github external
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, n_outputs])

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
train_step = tf.train.AdamOptimizer(learning_rate = lr).minimize(cross_entropy)

## define the output layer using softmax in the fine tuning step
x_ae = ae.transform()
y_ft = tf.matmul(x_ae, W) + b

cross_entropy_ft = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_ft, labels=y_))
train_step_ft = tf.train.AdamOptimizer(learning_rate = lr).minimize(cross_entropy_ft)

correct_prediction_ft = tf.equal(tf.argmax(y_ft, 1), tf.argmax(y_, 1))
accuracy_ft = tf.reduce_mean(tf.cast(correct_prediction_ft, tf.float32))

## Initialization
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for epoch in range(training_epochs):
    avg_cost = 0.
    total_batch = int(n_samples / batch_size)
    # Loop over all batches
    for i in range(total_batch):
        batch_xs, _ = mnist.train.next_batch(batch_size)
        # Fit training using batch data
        temp = ae.partial_fit()
        cost, opt = sess.run(temp,feed_dict={ae.x: batch_xs, ae.keep_prob : ae.in_keep_prob})

        # Compute average loss
github ARM-software / ML-KWS-for-MCU / models.py View on Github external
model_settings['spectrogram_length']]
    with the features corresponding to the same time slot arranged contiguously,
    and the oldest slot at index [:, 0], and newest at [:, -1].
    model_settings: Dictionary of information about the model.
    is_training: Whether the model is going to be used for training.
    runtime_settings: Dictionary of information about the runtime.

  Returns:
    TensorFlow node outputting logits results, and optionally a dropout
    placeholder.

  Raises:
      ValueError: If the inputs tensor is incorrectly shaped.
  """
  if is_training:
    dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')

  input_frequency_size = model_settings['dct_coefficient_count']
  input_time_size = model_settings['spectrogram_length']

  # Validation.
  input_shape = fingerprint_input.get_shape()
  if len(input_shape) != 2:
    raise ValueError('Inputs to `SVDF` should have rank == 2.')
  if input_shape[-1].value is None:
    raise ValueError('The last dimension of the inputs to `SVDF` '
                     'should be defined. Found `None`.')
  if input_shape[-1].value % input_frequency_size != 0:
    raise ValueError('Inputs feature dimension %d must be a multiple of '
                     'frame size %d', fingerprint_input.shape[-1].value,
                     input_frequency_size)
github rlworkgroup / garage / src / garage / tf / models / lstm.py View on Github external
cell_init_var (tf.Tensor): Initial cell state.

    """
    with tf.compat.v1.variable_scope(name):
        hidden_dim = lstm_cell.units
        output, [hidden,
                 cell] = lstm_cell(step_input_var,
                                   states=(step_hidden_var, step_cell_var))
        output = output_nonlinearity_layer(output)

        hidden_init_var = tf.compat.v1.get_variable(
            name='initial_hidden',
            shape=(hidden_dim, ),
            initializer=hidden_state_init,
            trainable=hidden_state_init_trainable,
            dtype=tf.float32)
        cell_init_var = tf.compat.v1.get_variable(
            name='initial_cell',
            shape=(hidden_dim, ),
            initializer=cell_state_init,
            trainable=cell_state_init_trainable,
            dtype=tf.float32)

        hidden_init_var_b = tf.broadcast_to(
            hidden_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])
        cell_init_var_b = tf.broadcast_to(
            cell_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])

        def step(hcprev, x):
            hprev = hcprev[:, :hidden_dim]
            cprev = hcprev[:, hidden_dim:]
            h, c = lstm_cell(x, states=(hprev, cprev))[1]
github google / youtube-8m / model_utils.py View on Github external
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(tf.random_uniform([batch_size, num_samples]),
                  tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])),
      tf.int32)
  batch_index = tf.tile(tf.expand_dims(tf.range(batch_size), 1),
                        [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
github NVIDIA / OpenSeq2Seq / example_configs / text2speech / wavenet_float.py View on Github external
"eval_steps": 500,
  "save_checkpoint_steps": 2500,
  "logdir": "result/wavenet-LJ-float",

  "optimizer": "Adam",
  "optimizer_params": {},
  "lr_policy": exp_decay,
  "lr_policy_params": {
    "learning_rate": 1e-3,
    "decay_steps": 20000,
    "decay_rate": 0.1,
    "use_staircase_decay": False,
    "begin_decay_at": 45000,
    "min_lr": 1e-5,
  },
  "dtype": tf.float32,
  "regularizer": tf.contrib.layers.l2_regularizer,
  "regularizer_params": {
    "scale": 1e-6
  },
  "initializer": tf.contrib.layers.xavier_initializer,

  "summaries": [],

  "encoder": WavenetEncoder,
  "encoder_params": {
    "layer_type": "conv1d",
    "kernel_size": 3,
    "strides": 1,
    "padding": "VALID",
    "blocks": 3,
    "layers_per_block": 10,
github brightmart / text_classification / a09_DynamicMemoryNet / a8_train.py View on Github external
count_not_exist = 0
    for i in range(1, vocab_size):  # loop each word
        word = vocabulary_index2word[i]  # get a word
        embedding = None
        try:
            embedding = word2vec_dict[word]  # try to get vector:it is an array.
        except Exception:
            embedding = None
        if embedding is not None:  # the 'word' exist a embedding
            word_embedding_2dlist[i] = embedding;
            count_exist = count_exist + 1  # assign array to this word.
        else:  # no embedding for this word
            word_embedding_2dlist[i] = np.random.uniform(-bound, bound, FLAGS.embed_size);
            count_not_exist = count_not_exist + 1  # init a random value for the word.
    word_embedding_final = np.array(word_embedding_2dlist)  # covert to 2d array.
    word_embedding = tf.constant(word_embedding_final, dtype=tf.float32)  # convert to tensor
    t_assign_embedding = tf.assign(model.Embedding,word_embedding)  # assign this value to our embedding variables of our model.
    sess.run(t_assign_embedding);
    print("word. exists embedding:", count_exist, " ;word not exist embedding:", count_not_exist)
    print("using pre-trained word emebedding.ended...")
github RedHenLab / multi-modal-emotion-prediction / LSTM.py View on Github external
def read_from_tfrecord(filenames):
    """
    Reads and reshapes binary files from IEMOCUP data.
    """
    tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
    reader = tf.TFRecordReader()
    _, tfrecord_serialized = reader.read(tfrecord_file_queue)
    tfrecord_features = tf.parse_single_example(tfrecord_serialized,
                        features={
                            'audio_features'    : tf.FixedLenFeature([],tf.string),
                            'sentence_len'      : tf.FixedLenFeature([],tf.string),
                            'word_embeddings'   : tf.FixedLenFeature([],tf.string),
                            'y'                 : tf.FixedLenFeature([],tf.string),
                            'label'             : tf.FixedLenFeature([],tf.string),
                                    },  name='tf_features')
    audio_features = tf.decode_raw(tfrecord_features['audio_features'],tf.float32)
    audio_features = tf.reshape(audio_features, (N_FEATURES,LEN_WORD,LEN_SENTENCE))
    audio_features.set_shape((N_FEATURES,LEN_WORD,LEN_SENTENCE))
    
    word_embeddings = tf.decode_raw(tfrecord_features['word_embeddings'],tf.float32)
    word_embeddings = tf.reshape(word_embeddings, (EMBEDDING_SIZE,LEN_SENTENCE))
    word_embeddings.set_shape((EMBEDDING_SIZE,LEN_SENTENCE))
    
    y = tf.decode_raw(tfrecord_features['y'],tf.float32)
    y.set_shape((Y_SHAPE))
    
    label = tf.decode_raw(tfrecord_features['label'],tf.int32)
    label.set_shape((1,))
    
    sentence_len = tf.decode_raw(tfrecord_features['sentence_len'],tf.int32)
    sentence_len.set_shape((1,))
github google-research / uda / image / randaugment / custom_ops.py View on Github external
num_units_in = inputs.shape[1]
    weights_shape = [num_units_in, num_units_out]
    unif_init_range = 1.0 / (num_units_out)**(0.5)
    weights_initializer = tf.random_uniform_initializer(
        -unif_init_range, unif_init_range)
    weights = variable(
        name='weights',
        shape=weights_shape,
        dtype=tf.float32,
        initializer=weights_initializer,
        trainable=True)
    bias_initializer = tf.constant_initializer(0.0)
    biases = variable(
        name='biases',
        shape=[num_units_out,],
        dtype=tf.float32,
        initializer=bias_initializer,
        trainable=True)
    outputs = tf.nn.xw_plus_b(inputs, weights, biases)
    return outputs
github cair / deep-rts / code / experiments / experiment_1 / playground / q_learning / Agent.py View on Github external
def setup_summary(self):
        episode_total_reward = tf.Variable(0.)
        tf.summary.scalar('DeepRTS/Total_Reward/Episode', episode_total_reward)
        episode_avg_max_q = tf.Variable(0.)
        tf.summary.scalar('DeepRTS/Average_Max_Q/Episode', episode_avg_max_q)
        episode_duration = tf.Variable(0.)
        tf.summary.scalar('DeepRTS/Duration/Episode', episode_duration)
        episode_avg_loss = tf.Variable(0.)
        tf.summary.scalar('DeepRTS/Average_Loss/Episode', episode_avg_loss)
        summary_vars = [episode_total_reward, episode_avg_max_q, episode_duration, episode_avg_loss]
        summary_placeholders = [tf.placeholder(tf.float32) for _ in range(len(summary_vars))]
        update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
        summary_op = tf.summary.merge_all()
        return summary_placeholders, update_ops, summary_op