How to use the cgt.floatX function in cgt

To help you get started, we’ve selected a few cgt examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github joschu / cgt / cgt / api.py View on Github external
def zeros(shape, dtype=None): #pylint: disable=W0621
    """
    Like numpy.zeros
    """
    if (dtype is None):
        dtype = cgt.floatX
    return core.Result(core.Fill(np.array(0, dtype)), shape)
github joschu / cgt / examples / bench / seq_model.py View on Github external
elapsed = []
horizons = 2**np.arange(2, 10)

for horizon in horizons:
    print "HORIZON",horizon
    tstart = time()

    batch_size = 6
    dim_x = 16
    mem_size = 10

    X_tnk = cgt.tensor3("X")

    cell = gru.GRUCell([dim_x], mem_size)

    Minit_nk = cgt.zeros((X_tnk.shape[0], X_tnk.shape[1]),cgt.floatX)
    M = Minit_nk

    for t in xrange(horizon):
        M = cell(M, X_tnk[t])

    # cgt.print_tree(M)
    print "simplifying..."
    M_simp = cgt.simplify([M])
    print "done"
    # cgt.print_tree(M_simp)
    print "fn before:",cgt.count_nodes(M)
    print "fn after:",cgt.count_nodes(M_simp)

    gs = cgt.grad(cgt.sum(M), cell.params())
    print "grad before", cgt.count_nodes(gs)
    g_simp = cgt.simplify(gs)
github joschu / cgt / examples / cgt_theano_feedforward_comparison.py View on Github external
# ================================================================
# Main script
# ================================================================


if __name__ == "__main__":

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--unittest", action="store_true")
    args = parser.parse_args()

    # Load data
    # -----------------------
    mnist = fetch_dataset("http://rll.berkeley.edu/cgt-data/mnist.npz")
    Xdata = (mnist["X"]/255.).astype(cgt.floatX)
    ydata = mnist["y"]

    Ntrain = 1000 if args.unittest else 10000
    Xtrain = Xdata[0:Ntrain]
    ytrain = ydata[0:Ntrain]
    sortinds = np.random.permutation(Ntrain)
    Xtrain = Xtrain[sortinds]
    ytrain = ytrain[sortinds]
    batch_size = 128
    cgt.update_config(backend="native")

    # Make symbolic variables
    # -----------------------

    def build_fc_return_loss(X, y):
        """
github joschu / cgt / examples / broken / caffe2cgt.py View on Github external
pad = (param.pad, param.pad) if param.HasField("pad")\
                else (param.pad_h, param.pad_w)
            output = [nn.pool(pool_type, X, stride, kernel, pad)]
        elif layer.type == "InnerProduct":
            X = inputs[0]
            if X.ndim == 4:
                X = cgt.reshape(X, [X.shape[0], X.shape[1]*X.shape[2]*X.shape[3]] )
            param = layer.inner_product_param
            nchanin = infer_shape(X)[1]
            Wshape = (param.num_output, nchanin)
            Wname = layer.param[0].name or layer.name+":W"
            Wval = np.empty(Wshape, dtype=cgt.floatX)
            W = name2node[Wname] = cgt.shared(Wval, name=Wname, fixed_shape_mask="all")
            bshape = (1, param.num_output)
            bname = layer.param[1].name or layer.name+":b"
            bval = np.empty(bshape, dtype=cgt.floatX)
            b = name2node[bname] = cgt.shared(bval, name=bname, fixed_shape_mask="all")
            yname = layer.top[0]
            output = [cgt.broadcast("+",X.dot(W), b, "xx,1x")          ]
        elif layer.type == "ReLU":
            output = [nn.rectify(inputs[0])]
        elif layer.type == "Softmax":
            output = [nn.softmax(inputs[0])]
        elif layer.type == "LRN":
            # XXX needs params
            param = layer.lrn_param
            output = [nn.lrn(inputs[0], param.alpha,param.beta, param.local_size)]
        elif layer.type == "Concat":
            param = layer.concat_param
            output = [cgt.concatenate(inputs, param.concat_dim)            ]
        elif layer.type == "Dropout":
            output = [nn.dropout(inputs[0])]
github joschu / cgt / examples / demo_mnist.py View on Github external
w2 = init_weights(64, 32, 3, 3)
        w3 = init_weights(128, 64, 3, 3)
        w4 = init_weights(128 * 2 * 2, 625)
        w_o = init_weights(625, 10)
        pofy_drop = convnet_model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hidden)
        pofy_nodrop = convnet_model(X, w, w2, w3, w4, w_o, 0., 0.)
        params = [w, w2, w3, w4, w_o]
    else:
        raise RuntimeError("Unreachable")

    cost_drop = -cgt.mean(categorical.loglik(y, pofy_drop))
    updates = rmsprop_updates(cost_drop, params, stepsize=args.stepsize)

    y_nodrop = cgt.argmax(pofy_nodrop, axis=1)
    cost_nodrop = -cgt.mean(categorical.loglik(y, pofy_nodrop))
    err_nodrop = cgt.cast(cgt.not_equal(y_nodrop, y), cgt.floatX).mean()

    train = cgt.function(inputs=[X, y], outputs=[], updates=updates)
    computeloss = cgt.function(inputs=[X, y], outputs=[err_nodrop,cost_nodrop])

    batch_size=128


    from cgt.tests import gradcheck_model
    if args.grad_check:
        cost_nodrop = cgt.core.clone(cost_nodrop, {X:Xtrain[:1],y:ytrain[:1]})
        print "doing gradient check..."
        print "------------------------------------"
        gradcheck_model(cost_nodrop, params[0:1])
        print "success!"
        return
github joschu / cgt / cgt / nn.py View on Github external
def conv2d(x_BKRC, f_LKrc, kernelshape, pad=(0,0), stride=(1,1)):
    devtype = cgt.get_config()["default_device"].devtype
    L,K,r,c = f_LKrc.shape
    if devtype == "gpu":        
        b_1K11 = cgt.zeros((1,L,1,1), cgt.floatX)
        return core.Result(cudnn_ops.CudnnConvForward(pad[0],pad[1],stride[0],stride[1]), [x_BKRC, f_LKrc, b_1K11])
    else:
        assert devtype == "cpu"
        col_BmnZ = im2col(x_BKRC, kernelshape, pad, stride)
        f_LZ = f_LKrc.reshape([L, K*r*c])
        B,m,n,Z = col_BmnZ.shape
        col_Bmn_Z = col_BmnZ.reshape([B*m*n, Z])
        col_Bmn_L = core.Result(core.Mul22(False,True), [col_Bmn_Z, f_LZ])
        return col_Bmn_L.reshape([B,m,n,L]).transpose([0,3,1,2])
github joschu / cgt / examples / demo_char_rnn.py View on Github external
def initialize_hiddens(n):
        return [np.zeros((n, args.size_mem), cgt.floatX) for _ in xrange(get_num_hiddens(args.arch, args.n_layers))]
github joschu / cgt / cgt / api.py View on Github external
def set_precision(prec):
    """
    prec in {"single", "double"}
    globally set floating point precision for float and complex types
    """    
    assert prec in ("half","single", "double","quad")
    if prec == "half":
        cgt.floatX = 'f2'
        cgt.complexX = None
        utils.warn("half precision not yet supported")
    elif prec == "single":
        cgt.floatX = 'f4'
        cgt.complexX = 'c8'
    elif prec == "double":
        cgt.floatX = 'f8'
        cgt.complexX = 'c16'
    elif prec == "quad":
        cgt.floatX = 'f16'
        cgt.complexX = 'c32'
github joschu / cgt / examples / demo_variational_autoencoder.py View on Github external
[newz],
        newy
    )

    S = (28, 28)
    M = 20
    manifold = np.zeros((S[0]*M, S[1]*M), dtype=cgt.floatX)

    for z1 in xrange(M):
        for z2 in xrange(M):
            print z1, z2
            z = np.zeros((1, 2))
            # pass unit square through inverse Gaussian CDF
            z[0, 0] = norm.ppf(z1 * 1.0/M + 1.0/(M * 2))
            z[0, 1] = norm.ppf(z2 * 1.0/M + 1.0/(M * 2))
            z = np.array(z, dtype=cgt.floatX)
            x_hat = decode(z)
            x_hat = x_hat.reshape(S)
            manifold[z1 * S[0]:(z1 + 1) * S[0],
                     z2 * S[1]:(z2 + 1) * S[1]] = x_hat

    plt.imshow(manifold, cmap="Greys_r")
    plt.axis("off")
    plt.show()
github joschu / cgt / examples / param_collection.py View on Github external
def set_value_flat(self, theta):
        theta = theta.astype(cgt.floatX)
        arrs = []
        n = 0        
        for shape in self.get_shapes():
            size = np.prod(shape)
            arrs.append(theta[n:n+size].reshape(shape))
            n += size
        assert theta.size == n
        self.set_values(arrs)