How to use the cgt.shared function in cgt

To help you get started, we’ve selected a few cgt examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github joschu / cgt / test / _test_devices.py View on Github external
def test_devices(self):
        cgt.set_precision('single')
        N = 10
        K = 3

        Xval = np.random.randn(N,K)
        wval = np.random.randn(K)
        bval = np.random.randn()
        yval = np.random.randn(N)

        X_nk = cgt.shared(Xval, "X", device=cgt.Device(devtype='gpu'))
        y_n = cgt.shared(yval, "y")
        w_k = cgt.shared(wval, "w")
        b = cgt.shared(bval, name="b")

        print "bval",bval

        ypred = cgt.dot(cgt.square(X_nk), w_k) + b

        err = cgt.sum(cgt.square(ypred - y_n))
        g = cgt.grad(err, [w_k, b])
        outputs = [err]
        def devfn(node):
            if isinstance(node, cgt.Result) and node.op == err.op: # XXX add fn for this
                return cgt.Device(devtype="cpu")

        func=cgt.VarSizeFunc([], outputs, devfn = devfn)
github joschu / cgt / test / _test_devices.py View on Github external
def test_devices(self):
        cgt.set_precision('single')
        N = 10
        K = 3

        Xval = np.random.randn(N,K)
        wval = np.random.randn(K)
        bval = np.random.randn()
        yval = np.random.randn(N)

        X_nk = cgt.shared(Xval, "X", device=cgt.Device(devtype='gpu'))
        y_n = cgt.shared(yval, "y")
        w_k = cgt.shared(wval, "w")
        b = cgt.shared(bval, name="b")

        print "bval",bval

        ypred = cgt.dot(cgt.square(X_nk), w_k) + b

        err = cgt.sum(cgt.square(ypred - y_n))
        g = cgt.grad(err, [w_k, b])
        outputs = [err]
        def devfn(node):
            if isinstance(node, cgt.Result) and node.op == err.op: # XXX add fn for this
                return cgt.Device(devtype="cpu")

        func=cgt.VarSizeFunc([], outputs, devfn = devfn)
github joschu / cgt / examples / demo_neural_turing_machine.py View on Github external
def make_ntm_initial_states(opt):
    n, m, h, b = opt.n, opt.m, opt.h, opt.b
    M_1nm = cgt.shared(.1*nr.randn(1,n,m))
    winit_1Hn = cgt.shared(.1*nr.rand(1,2*h,n))
    winit_1Hn = sum_normalize2(cgt.exp(winit_1Hn))
    rinit_1hm = cgt.shared(np.zeros((1,h,m)))
    return [cgt.repeat(arr, b, axis=0) for arr in (M_1nm, winit_1Hn, rinit_1hm)]
github joschu / cgt / cgt / nn.py View on Github external
params : a list of cgt shared variables. We generate update
            expressions w.r.t. these variables.
    learning_rate : float
        Tunes the size of the update step.
    momentum: float
        Tunes the weight given to the velocity term.
    
    Returns
    -------
    list of tuples of the form (param, new_param) and (velocity, new_velocity)
    """
    updates = []
    grads = cgt.grad(cost, params)
    for param, grad in zip(params, grads):
        assert isinstance(param.op, core.GetData)
        velocity = cgt.shared(np.zeros(param.op.get_shape(), dtype=param.dtype))
        new_velocity = mu * velocity - learning_rate * grad
        new_param = param + new_velocity
        updates.append((velocity, new_velocity))
        updates.append((param, new_param))

    return updates
github joschu / cgt / examples / demo_cifar.py View on Github external
def rmsprop_updates(cost, params, stepsize=0.001, rho=0.9, epsilon=1e-6):
    grads = cgt.grad(cost, params)
    updates = []
    for p, g in zip(params, grads):
        acc = cgt.shared(p.op.get_value() * 0.)
        acc_new = rho * acc + (1 - rho) * cgt.square(g)
        gradient_scaling = cgt.sqrt(acc_new + epsilon)
        g = g / gradient_scaling
        updates.append((acc, acc_new))
        updates.append((p, p - stepsize * g))
    return updates
github joschu / cgt / cgt / nn.py View on Github external
def parameter(val, name=None, device=None):
    fixed_shape_mask = "all"
    out = cgt.shared(val, name=name, device=device, fixed_shape_mask=fixed_shape_mask)
    out.props["is_parameter"] = True
    return out
github joschu / cgt / examples / broken / caffe2cgt.py View on Github external
else (param.kernel_h, param.kernel_w)
            stride = (param.stride, param.stride) if param.HasField("stride")\
                else (param.stride_h, param.stride_w)
            pad = (param.pad, param.pad) if param.HasField("pad")\
                else (param.pad_h, param.pad_w)
            output = [nn.pool(pool_type, X, stride, kernel, pad)]
        elif layer.type == "InnerProduct":
            X = inputs[0]
            if X.ndim == 4:
                X = cgt.reshape(X, [X.shape[0], X.shape[1]*X.shape[2]*X.shape[3]] )
            param = layer.inner_product_param
            nchanin = infer_shape(X)[1]
            Wshape = (param.num_output, nchanin)
            Wname = layer.param[0].name or layer.name+":W"
            Wval = np.empty(Wshape, dtype=cgt.floatX)
            W = name2node[Wname] = cgt.shared(Wval, name=Wname, fixed_shape_mask="all")
            bshape = (1, param.num_output)
            bname = layer.param[1].name or layer.name+":b"
            bval = np.empty(bshape, dtype=cgt.floatX)
            b = name2node[bname] = cgt.shared(bval, name=bname, fixed_shape_mask="all")
            yname = layer.top[0]
            output = [cgt.broadcast("+",X.dot(W), b, "xx,1x")          ]
        elif layer.type == "ReLU":
            output = [nn.rectify(inputs[0])]
        elif layer.type == "Softmax":
            output = [nn.softmax(inputs[0])]
        elif layer.type == "LRN":
            # XXX needs params
            param = layer.lrn_param
            output = [nn.lrn(inputs[0], param.alpha,param.beta, param.local_size)]
        elif layer.type == "Concat":
            param = layer.concat_param
github joschu / cgt / examples / broken / caffe2cgt.py View on Github external
else (param.pad_h, param.pad_w)
            output = [nn.pool(pool_type, X, stride, kernel, pad)]
        elif layer.type == "InnerProduct":
            X = inputs[0]
            if X.ndim == 4:
                X = cgt.reshape(X, [X.shape[0], X.shape[1]*X.shape[2]*X.shape[3]] )
            param = layer.inner_product_param
            nchanin = infer_shape(X)[1]
            Wshape = (param.num_output, nchanin)
            Wname = layer.param[0].name or layer.name+":W"
            Wval = np.empty(Wshape, dtype=cgt.floatX)
            W = name2node[Wname] = cgt.shared(Wval, name=Wname, fixed_shape_mask="all")
            bshape = (1, param.num_output)
            bname = layer.param[1].name or layer.name+":b"
            bval = np.empty(bshape, dtype=cgt.floatX)
            b = name2node[bname] = cgt.shared(bval, name=bname, fixed_shape_mask="all")
            yname = layer.top[0]
            output = [cgt.broadcast("+",X.dot(W), b, "xx,1x")          ]
        elif layer.type == "ReLU":
            output = [nn.rectify(inputs[0])]
        elif layer.type == "Softmax":
            output = [nn.softmax(inputs[0])]
        elif layer.type == "LRN":
            # XXX needs params
            param = layer.lrn_param
            output = [nn.lrn(inputs[0], param.alpha,param.beta, param.local_size)]
        elif layer.type == "Concat":
            param = layer.concat_param
            output = [cgt.concatenate(inputs, param.concat_dim)            ]
        elif layer.type == "Dropout":
            output = [nn.dropout(inputs[0])]
        elif layer.type == "SoftmaxWithLoss":
github joschu / cgt / examples / demo_mnist.py View on Github external
def rmsprop_updates(cost, params, stepsize=0.001, rho=0.9, epsilon=1e-6):
    grads = cgt.grad(cost, params)
    updates = []
    for p, g in zip(params, grads):
        acc = cgt.shared(p.op.get_value() * 0.)
        acc_new = rho * acc + (1 - rho) * cgt.square(g)
        gradient_scaling = cgt.sqrt(acc_new + epsilon)
        g = g / gradient_scaling
        updates.append((acc, acc_new))
        updates.append((p, p - stepsize * g))
    return updates
github joschu / cgt / examples / broken / caffe2cgt.py View on Github external
crop_size = tp.crop_size
            chans = len(tp.mean_value)
            dp = layer.data_param
            batch_size = dp.batch_size
            output = [cgt.tensor(dtype=cgt.floatX,ndim=4,name=layer.name, fixed_shape=(batch_size,chans,crop_size,crop_size)),
                      cgt.tensor(dtype='i8',ndim=2,name=layer.name, fixed_shape=(batch_size, 1))]
        elif layer.type == "Convolution":
            X = inputs[0]
            param = layer.convolution_param
            kh,kw = (param.kernel_size, param.kernel_size) if param.HasField("kernel_size")\
                else (param.kernel_h, param.kernel_w)
            nchanin = infer_shape(X)[0]
            Wshape = (param.num_output, nchanin, kh, kw)
            Wname = layer.param[0].name or layer.name+":W"
            Wval = np.empty(Wshape, dtype=cgt.floatX)
            W = name2node[Wname] = cgt.shared(Wval, name=Wname, fixed_shape_mask="all")
            bshape = (1, param.num_output, 1, 1)
            bname = layer.param[1].name or layer.name+":b"
            bval = np.empty(bshape, dtype=cgt.floatX)
            b = name2node[bname] = cgt.shared(bval, name=bname, fixed_shape_mask="all")
            sh,sw = (param.stride, param.stride) if param.HasField("stride")\
                else (param.stride_h, param.stride_w)
            output = [cgt.broadcast("+",nn.conv2d(X, W, subsample=(sh,sw)), b, "xxxx,1x11")]
        elif layer.type == "Pooling":
            param = layer.pooling_param
            X = inputs[0]
            pool_type = {param.MAX : "max", param.AVE : "mean"}[param.pool]
            height_in,width_in = infer_shape(X)[2:4]
            kernel = (param.kernel_size, param.kernel_size) if param.HasField("kernel_size")\
                else (param.kernel_h, param.kernel_w)
            stride = (param.stride, param.stride) if param.HasField("stride")\
                else (param.stride_h, param.stride_w)