Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def runTest(self):
cgt.set_precision('double')
x = cgt.vector()
y = cgt.square(x)
eg = cgt.execution.the_whole_schmear([x],[y+y],[])
pprint.pprint(eg.to_json())
import cycgt2
interp = cycgt2.cInterpreter(eg)
print interp(np.array([3,4,5,6],'f8'))
def setUp(self):
cgt.set_precision('double')
nr.seed(303)
def runTest(self):
def test_devices(self):
cgt.set_precision('single')
N = 10
K = 3
Xval = np.random.randn(N,K)
wval = np.random.randn(K)
bval = np.random.randn()
yval = np.random.randn(N)
X_nk = cgt.shared(Xval, "X", device=cgt.Device(devtype='gpu'))
y_n = cgt.shared(yval, "y")
w_k = cgt.shared(wval, "w")
b = cgt.shared(bval, name="b")
print "bval",bval
ypred = cgt.dot(cgt.square(X_nk), w_k) + b
def runTest(self):
cgt.set_precision('double')
N = 10
K = 3
Xval = np.random.randn(N,K)
wval = np.random.randn(K)
bval = np.random.randn()
yval = np.random.randn(N)
X_nk = cgt.matrix("X")
y_n = cgt.vector("y")
w_k = cgt.vector("w")
b = cgt.scalar(name="b")
ypred = cgt.dot(X_nk, w_k) + b
err = cgt.sum(cgt.square(ypred - y_n))
def runTest(self):
np.random.seed(0)
cgt.set_precision('double')
x = cgt.scalar('x')
y = cgt.scalar('y')
z = cgt.scalar('z')
vars = [x,y,z] #pylint: disable=W0622
vals = nr.rand(len(vars))+1
PROB2RESULT = {}
for ((key,_), cls) in it.chain(
it.izip(core.UNARY_INFO.items(),it.repeat(core.ElwiseUnary)),
it.izip(core.BINARY_INFO.items(),it.repeat(core.ElwiseBinary))
):
if key == "conj":
print "skipping conj"
continue
utils.colorprint(utils.Color.YELLOW, "Testing %s\n"%key)
batchsize = x.shape[0]
outchans = f.shape[0]
try:
import scipy.signal
except ImportError:
print "skipping because we don't have ndimage"
return
out = np.zeros((batchsize,outchans,x.shape[2]+filtrows-1,x.shape[3]+filtcols-1))
for b in xrange(x.shape[0]):
for inchan in xrange(x.shape[1]):
for outchan in xrange(outchans):
out[b,outchan] += scipy.signal.convolve2d(x[b,inchan],f[outchan,inchan][::-1,::-1],mode='full')
cgt.set_precision('double')
f = cgt.function([], nn.conv2d(cgt.constant(x), cgt.constant(f), kernelshape=(filtrows,filtcols), pad=(filtrows-1, filtcols-1)))
out1 = f()
# out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
np.testing.assert_allclose(out, out1)
import cgt
from cgt import nn
from cgt.core import infer_shape
import numpy as np
infile = "/Users/joschu/Src/caffe/examples/mnist/lenet.prototxt"
# infile = "/Users/joschu/Src/caffe/models/bvlc_googlenet/train_val.prototxt"
with open(osp.expanduser(infile),"r") as fh:
text = fh.read()
net = NetParameter()
text_format.Merge(text, net)
name2node = {}
cgt.set_precision('single')
if net.input: #pylint: disable=E1101
assert len(net.input) == 1 #pylint: disable=E1101
name2node[net.input[0]] = cgt.tensor(ndim=4,dtype=cgt.floatX, fixed_shape=tuple(net.input_dim))
# XXX super inefficient
for layer in net.layer: #pylint: disable=E1101
if layer.phase==TRAIN:
print "loading layer %s type=%s in=%s out=%s"%(layer.name, layer.type, layer.bottom, layer.top)
output = None
inputs = [name2node[name] for name in layer.bottom]
if layer.type == "Data":
tp = layer.transform_param
crop_size = tp.crop_size
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--grad_check",action="store_true")
parser.add_argument("--n_batches",type=int,default=1000000)
parser.add_argument("--profile",action="store_true")
parser.add_argument("--unittest", action="store_true")
parser.add_argument("--task",choices=["copy","reverse_copy","repeat_copy"],default="copy")
args = parser.parse_args()
np.seterr("raise")
cgt.set_precision("quad" if args.grad_check else "double")
np.random.seed(0)
# model parameters
if args.grad_check:
opt = NTMOpts(
b = 1, # batch size
h = 1, # number of heads
n = 2, # number of memory sites
m = 3, # dimension at each memory site
k = 4, # dimension of input
p = 2, # dimension of output
ff_hid_sizes = []
)
seq_length = 2
else:
parser.add_argument("--size_mem", type=int,default=64)
parser.add_argument("--size_batch", type=int,default=64)
parser.add_argument("--n_layers",type=int,default=2)
parser.add_argument("--n_unroll",type=int,default=16)
parser.add_argument("--step_size",type=float,default=.01)
parser.add_argument("--decay_rate",type=float,default=0.95)
parser.add_argument("--n_epochs",type=int,default=20)
parser.add_argument("--arch",choices=["lstm","gru"],default="lstm")
parser.add_argument("--grad_check",action="store_true")
parser.add_argument("--profile",action="store_true")
parser.add_argument("--unittest",action="store_true")
parser.add_argument("--temperature",type=float,default=1)
args = parser.parse_args()
cgt.set_precision("quad" if args.grad_check else "single")
assert args.n_unroll > 1
loader = Loader(args.data_dir,args.size_batch, args.n_unroll, (1.0,0,0))
network, f_loss, f_loss_and_grad, f_step = make_loss_and_grad_and_step(args.arch, loader.size_vocab,
loader.size_vocab, args.size_mem, args.size_batch, args.n_layers, args.n_unroll)
if args.profile: profiler.start()
params = network.get_parameters()
pc = ParamCollection(params)
pc.set_value_flat(nr.uniform(-.1, .1, size=(pc.get_total_size(),)))
def initialize_hiddens(n):
return [np.zeros((n, args.size_mem), cgt.floatX) for _ in xrange(get_num_hiddens(args.arch, args.n_layers))]