How to use the lab.B.concat function in lab

To help you get started, we’ve selected a few lab examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github wesselb / stheno / stheno / kernel.py View on Github external
# Copy the input `nx` times to efficiently compute many derivatives.
            yis = tf.identity_n([y[:, i:i + 1]] * nx)
            t.watch(yis)

            # Tile inputs for batched computation.
            x = B.reshape(B.tile(x, 1, ny), nx * ny, -1)
            y = B.tile(y, nx, 1)

            # Insert tracked dimension, which is different for every tile.
            yi = B.concat(*yis, axis=0)
            y = B.concat(y[:, :i], yi, y[:, i + 1:], axis=1)

            # Perform the derivative computation.
            out = B.dense(k_elwise(x, y))
            grads = t.gradient(out, yis, unconnected_gradients='zero')
            return B.transpose(B.concat(*grads, axis=1))
github wesselb / stheno / stheno / matrix.py View on Github external
def add(a, b):
    shape_a, shape_b, dtype = B.shape(a.middle), B.shape(b.middle), B.dtype(a)
    middle = B.concat2d([a.middle, B.zeros(dtype, shape_a[0], shape_b[1])],
                        [B.zeros(dtype, shape_b[0], shape_a[1]), b.middle])
    return LowRank(left=B.concat(a.left, b.left, axis=1),
                   right=B.concat(a.right, b.right, axis=1),
                   middle=middle)
github wesselb / stheno / stheno / kernel.py View on Github external
with tf.GradientTape() as t:
            # Get the numbers of inputs.
            nx = B.shape(x)[0]
            ny = B.shape(y)[0]

            # Copy the input `ny` times to efficiently compute many derivatives.
            xis = tf.identity_n([x[:, i:i + 1]] * ny)
            t.watch(xis)

            # Tile inputs for batched computation.
            x = B.tile(x, ny, 1)
            y = B.reshape(B.tile(y, 1, nx), ny * nx, -1)

            # Insert tracked dimension, which is different for every tile.
            xi = B.concat(*xis, axis=0)
            x = B.concat(x[:, :i], xi, x[:, i + 1:], axis=1)

            # Perform the derivative computation.
            out = B.dense(k_elwise(x, y))
            grads = t.gradient(out, xis, unconnected_gradients='zero')
            return B.concat(*grads, axis=1)
github wesselb / stheno / stheno / kernel.py View on Github external
def _dky_elwise(x, y):
        import tensorflow as tf

        with tf.GradientTape() as t:
            yi = y[:, i:i + 1]
            t.watch(yi)
            y = B.concat(y[:, :i], yi, y[:, i + 1:], axis=1)
            out = B.dense(k_elwise(x, y))
            return t.gradient(out, yi, unconnected_gradients='zero')
github wesselb / stheno / stheno / momean.py View on Github external
def __call__(self, x):
        return B.concat(*[self(xi) for xi in x.get()], axis=0)
github wesselb / stheno / stheno / kernel.py View on Github external
with tf.GradientTape() as t:
            # Get the numbers of inputs.
            nx = B.shape(x)[0]
            ny = B.shape(y)[0]

            # Copy the input `nx` times to efficiently compute many derivatives.
            yis = tf.identity_n([y[:, i:i + 1]] * nx)
            t.watch(yis)

            # Tile inputs for batched computation.
            x = B.reshape(B.tile(x, 1, ny), nx * ny, -1)
            y = B.tile(y, nx, 1)

            # Insert tracked dimension, which is different for every tile.
            yi = B.concat(*yis, axis=0)
            y = B.concat(y[:, :i], yi, y[:, i + 1:], axis=1)

            # Perform the derivative computation.
            out = B.dense(k_elwise(x, y))
            grads = t.gradient(out, yis, unconnected_gradients='zero')
            return B.transpose(B.concat(*grads, axis=1))
github wesselb / stheno / stheno / matrix.py View on Github external
# Get shape of `a`.
    a_rows, a_cols = B.shape(a)

    # If `a` is square, don't do complicated things.
    if a_rows == a_cols and a_rows is not None:
        return B.diag(a)[:, None] * dense(b)

    # Compute the core part.
    rows = B.minimum(a_rows, B.shape(b)[0])
    core = B.diag(a)[:rows, None] * dense(b)[:rows, :]

    # Compute extra zeros to be appended.
    extra_rows = a_rows - rows
    extra_zeros = B.zeros(B.dtype(b), extra_rows, B.shape(b)[1])
    return B.concat(core, extra_zeros, axis=0)
github wesselb / stheno / stheno / kernel.py View on Github external
with tf.GradientTape() as t:
            # Get the numbers of inputs.
            nx = B.shape(x)[0]
            ny = B.shape(y)[0]

            # Copy the input `nx` times to efficiently compute many derivatives.
            yis = tf.identity_n([y[:, i:i + 1]] * nx)
            t.watch(yis)

            # Tile inputs for batched computation.
            x = B.reshape(B.tile(x, 1, ny), nx * ny, -1)
            y = B.tile(y, nx, 1)

            # Insert tracked dimension, which is different for every tile.
            yi = B.concat(*yis, axis=0)
            y = B.concat(y[:, :i], yi, y[:, i + 1:], axis=1)

            # Perform the derivative computation.
            out = B.dense(k_elwise(x, y))
            grads = t.gradient(out, yis, unconnected_gradients='zero')
            return B.transpose(B.concat(*grads, axis=1))
github wesselb / stheno / stheno / kernel.py View on Github external
# Copy the input `ny` times to efficiently compute many derivatives.
            xis = tf.identity_n([x[:, i:i + 1]] * ny)
            t.watch(xis)

            # Tile inputs for batched computation.
            x = B.tile(x, ny, 1)
            y = B.reshape(B.tile(y, 1, nx), ny * nx, -1)

            # Insert tracked dimension, which is different for every tile.
            xi = B.concat(*xis, axis=0)
            x = B.concat(x[:, :i], xi, x[:, i + 1:], axis=1)

            # Perform the derivative computation.
            out = B.dense(k_elwise(x, y))
            grads = t.gradient(out, xis, unconnected_gradients='zero')
            return B.concat(*grads, axis=1)