How to use the ludwig.models.modules.initializer_modules.get_initializer function in ludwig

To help you get started, we’ve selected a few ludwig examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uber / ludwig / ludwig / models / modules / convolutional_modules.py View on Github external
def conv_layer(inputs, kernel_shape, biases_shape,
               stride=1, padding='SAME', activation='relu', norm=None,
               dropout=False, dropout_rate=None, regularizer=None,
               initializer=None,
               dimensions=2, is_training=True):
    if initializer is not None:
        initializer_obj = get_initializer(initializer)
        weights = tf.compat.v1.get_variable(
            'weights',
            initializer=initializer_obj(kernel_shape),
            regularizer=regularizer
        )
    else:
        if activation == 'relu':
            initializer = get_initializer('he_uniform')
        elif activation == 'sigmoid' or activation == 'tanh':
            initializer = get_initializer('glorot_uniform')
        # if initializer is None, tensorFlow seems to be using
        # a glorot uniform initializer
        weights = tf.compat.v1.get_variable(
            'weights',
            kernel_shape,
            regularizer=regularizer,
github uber / ludwig / ludwig / features / vector_feature.py View on Github external
def vector_predictions(
            self,
            hidden,
            hidden_size,
            regularizer=None,
    ):
        with tf.compat.v1.variable_scope('predictions_{}'.format(self.name)):
            initializer_obj = get_initializer(self.initializer)
            weights = tf.compat.v1.get_variable(
                'weights',
                initializer=initializer_obj([hidden_size, self.vector_size]),
                regularizer=regularizer
            )
            logger.debug('  projection_weights: {0}'.format(weights))

            biases = tf.compat.v1.get_variable(
                'biases',
                [self.vector_size]
            )
            logger.debug('  projection_biases: {0}'.format(biases))

            logits = tf.matmul(hidden, weights) + biases
            logger.debug('  logits: {0}'.format(logits))
github uber / ludwig / ludwig / models / modules / embedding_modules.py View on Github external
if initializer is not None:
                initializer_obj_ref = get_initializer(initializer)
            else:
                initializer_obj_ref = get_initializer(
                    {'type': 'uniform', 'minval': -1.0, 'maxval': 1.0})
            initializer_obj = initializer_obj_ref([vocab_size, embedding_size])

        embeddings = tf.compat.v1.get_variable('embeddings',
                                     initializer=initializer_obj,
                                     trainable=embeddings_trainable,
                                     regularizer=regularizer)

    elif representation == 'sparse':
        embedding_size = vocab_size
        embeddings = tf.compat.v1.get_variable('embeddings',
                                     initializer=get_initializer('identity')(
                                         [vocab_size, embedding_size]),
                                     trainable=False)

    else:
        raise Exception(
            'Embedding representation {} not supported.'.format(representation))

    return embeddings, embedding_size
github uber / ludwig / ludwig / models / modules / fully_connected_modules.py View on Github external
def fc_layer(inputs, in_count, out_count,
             activation='relu', norm=None,
             is_training=True, weights=None, biases=None,
             dropout=False, dropout_rate=None,
             initializer=None, regularizer=None):
    if weights is None:
        if initializer is not None:
            initializer_obj = get_initializer(initializer)
            weights = tf.compat.v1.get_variable(
                'weights',
                initializer=initializer_obj([in_count, out_count]),
                regularizer=regularizer
            )
        else:
            if activation == 'relu':
                initializer = get_initializer('he_uniform')
            elif activation == 'sigmoid' or activation == 'tanh':
                initializer = get_initializer('glorot_uniform')
            # if initializer is None, tensorFlow seems to be using
            # a glorot uniform initializer
            weights = tf.compat.v1.get_variable(
                'weights',
                [in_count, out_count],
                regularizer=regularizer,
                initializer=initializer
            )

    logger.debug('  fc_weights: {}'.format(weights))

    if biases is None:
        biases = tf.compat.v1.get_variable('biases', [out_count],
                                 initializer=tf.constant_initializer(0.01))
github uber / ludwig / ludwig / models / modules / fully_connected_modules.py View on Github external
is_training=True, weights=None, biases=None,
             dropout=False, dropout_rate=None,
             initializer=None, regularizer=None):
    if weights is None:
        if initializer is not None:
            initializer_obj = get_initializer(initializer)
            weights = tf.compat.v1.get_variable(
                'weights',
                initializer=initializer_obj([in_count, out_count]),
                regularizer=regularizer
            )
        else:
            if activation == 'relu':
                initializer = get_initializer('he_uniform')
            elif activation == 'sigmoid' or activation == 'tanh':
                initializer = get_initializer('glorot_uniform')
            # if initializer is None, tensorFlow seems to be using
            # a glorot uniform initializer
            weights = tf.compat.v1.get_variable(
                'weights',
                [in_count, out_count],
                regularizer=regularizer,
                initializer=initializer
            )

    logger.debug('  fc_weights: {}'.format(weights))

    if biases is None:
        biases = tf.compat.v1.get_variable('biases', [out_count],
                                 initializer=tf.constant_initializer(0.01))
    logger.debug('  fc_biases: {}'.format(biases))
github uber / ludwig / ludwig / models / modules / fully_connected_modules.py View on Github external
def fc_layer(inputs, in_count, out_count,
             activation='relu', norm=None,
             is_training=True, weights=None, biases=None,
             dropout=False, dropout_rate=None,
             initializer=None, regularizer=None):
    if weights is None:
        if initializer is not None:
            initializer_obj = get_initializer(initializer)
            weights = tf.compat.v1.get_variable(
                'weights',
                initializer=initializer_obj([in_count, out_count]),
                regularizer=regularizer
            )
        else:
            if activation == 'relu':
                initializer = get_initializer('he_uniform')
            elif activation == 'sigmoid' or activation == 'tanh':
                initializer = get_initializer('glorot_uniform')
            # if initializer is None, tensorFlow seems to be using
            # a glorot uniform initializer
            weights = tf.compat.v1.get_variable(
                'weights',
                [in_count, out_count],
                regularizer=regularizer,
github uber / ludwig / ludwig / models / modules / convolutional_modules.py View on Github external
stride=1, padding='SAME', activation='relu', norm=None,
               dropout=False, dropout_rate=None, regularizer=None,
               initializer=None,
               dimensions=2, is_training=True):
    if initializer is not None:
        initializer_obj = get_initializer(initializer)
        weights = tf.compat.v1.get_variable(
            'weights',
            initializer=initializer_obj(kernel_shape),
            regularizer=regularizer
        )
    else:
        if activation == 'relu':
            initializer = get_initializer('he_uniform')
        elif activation == 'sigmoid' or activation == 'tanh':
            initializer = get_initializer('glorot_uniform')
        # if initializer is None, tensorFlow seems to be using
        # a glorot uniform initializer
        weights = tf.compat.v1.get_variable(
            'weights',
            kernel_shape,
            regularizer=regularizer,
            initializer=initializer
        )
    logger.debug('  conv_weights: {0}'.format(weights))

    biases = tf.compat.v1.get_variable('biases', biases_shape,
                             initializer=tf.constant_initializer(0.01))
    logger.debug('  conv_biases: {0}'.format(biases))

    if dimensions == 1:
        return conv_1d(inputs, weights, biases,
github uber / ludwig / ludwig / models / modules / convolutional_modules.py View on Github external
def conv_layer(inputs, kernel_shape, biases_shape,
               stride=1, padding='SAME', activation='relu', norm=None,
               dropout=False, dropout_rate=None, regularizer=None,
               initializer=None,
               dimensions=2, is_training=True):
    if initializer is not None:
        initializer_obj = get_initializer(initializer)
        weights = tf.compat.v1.get_variable(
            'weights',
            initializer=initializer_obj(kernel_shape),
            regularizer=regularizer
        )
    else:
        if activation == 'relu':
            initializer = get_initializer('he_uniform')
        elif activation == 'sigmoid' or activation == 'tanh':
            initializer = get_initializer('glorot_uniform')
        # if initializer is None, tensorFlow seems to be using
        # a glorot uniform initializer
        weights = tf.compat.v1.get_variable(
            'weights',
            kernel_shape,
            regularizer=regularizer,
            initializer=initializer
        )
    logger.debug('  conv_weights: {0}'.format(weights))

    biases = tf.compat.v1.get_variable('biases', biases_shape,
                             initializer=tf.constant_initializer(0.01))
    logger.debug('  conv_biases: {0}'.format(biases))