Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_devices(self):
cgt.set_precision('single')
N = 10
K = 3
Xval = np.random.randn(N,K)
wval = np.random.randn(K)
bval = np.random.randn()
yval = np.random.randn(N)
X_nk = cgt.shared(Xval, "X", device=cgt.Device(devtype='gpu'))
y_n = cgt.shared(yval, "y")
w_k = cgt.shared(wval, "w")
b = cgt.shared(bval, name="b")
print "bval",bval
ypred = cgt.dot(cgt.square(X_nk), w_k) + b
err = cgt.sum(cgt.square(ypred - y_n))
g = cgt.grad(err, [w_k, b])
outputs = [err]
def devfn(node):
if isinstance(node, cgt.Result) and node.op == err.op: # XXX add fn for this
return cgt.Device(devtype="cpu")
func=cgt.VarSizeFunc([], outputs, devfn = devfn)
def test_devices(self):
cgt.set_precision('single')
N = 10
K = 3
Xval = np.random.randn(N,K)
wval = np.random.randn(K)
bval = np.random.randn()
yval = np.random.randn(N)
X_nk = cgt.shared(Xval, "X", device=cgt.Device(devtype='gpu'))
y_n = cgt.shared(yval, "y")
w_k = cgt.shared(wval, "w")
b = cgt.shared(bval, name="b")
print "bval",bval
ypred = cgt.dot(cgt.square(X_nk), w_k) + b
err = cgt.sum(cgt.square(ypred - y_n))
g = cgt.grad(err, [w_k, b])
outputs = [err]
def devfn(node):
if isinstance(node, cgt.Result) and node.op == err.op: # XXX add fn for this
return cgt.Device(devtype="cpu")
func=cgt.VarSizeFunc([], outputs, devfn = devfn)
def broadcast(opname,x,y,bcpat):
return cgt.broadcast(opname, x, y, bcpat) if isinstance(x, core.Node) else eval("x %s y"%opname)
outchans = f.shape[0]
try:
import scipy.signal
except ImportError:
print "skipping because we don't have ndimage"
return
out = np.zeros((batchsize,outchans,x.shape[2]+filtrows-1,x.shape[3]+filtcols-1))
for b in xrange(x.shape[0]):
for inchan in xrange(x.shape[1]):
for outchan in xrange(outchans):
out[b,outchan] += scipy.signal.convolve2d(x[b,inchan],f[outchan,inchan][::-1,::-1],mode='full')
cgt.set_precision('double')
f = cgt.function([], nn.conv2d(cgt.constant(x), cgt.constant(f), kernelshape=(filtrows,filtcols), pad=(filtrows-1, filtcols-1)))
out1 = f()
# out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
np.testing.assert_allclose(out, out1)
cgt.utils.colorprint(cgt.utils.Color.YELLOW,"Testing %s(%s)\n"%(f.__name__, types))
sy_inputs = map(tensor_like, nu_inputs)
for (i,sy) in enumerate(sy_inputs):
sy.name = "x%i"%i
sy_result = f(*sy_inputs)
def maybeprint(msg):
if DISPLAY: print msg
maybeprint("Function:")
if DISPLAY: cgt.print_tree([sy_result])
f_cgt = cgt.function(sy_inputs, sy_result)
sy_grads = cgt.grad(sy_result, sy_inputs)
gradf_cgt = cgt.function(sy_inputs, sy_grads)
sy_result_simple = core.simplify([sy_result])
sy_grads_simple = core.simplify(sy_grads)
maybeprint("Gradient:")
if DISPLAY: cgt.print_tree(sy_grads)
maybeprint("Gradient after simplification:")
if DISPLAY: cgt.print_tree(sy_grads_simple)
out_true = f(*nu_inputs)
out_cgt = f_cgt(*nu_inputs)
grads_true = gradients_affine(f_cgt, nu_inputs, h=1e-4 if "max" in f.__name__ else 1e-1)
grads_cgt = gradf_cgt(*nu_inputs)
yval = np.random.randn(N)
X_nk = cgt.shared(Xval, "X")
y_n = cgt.shared(yval, "y")
w_k = cgt.shared(wval, "w")
b = cgt.shared(bval, name="b")
ypred = cgt.dot(X_nk, w_k) + b
err = cgt.sum(cgt.square(ypred - y_n))
g = cgt.grad(err, [w_k, b])
g = core.simplify(g)
pars = [w_k, b]
flatx = nn.setup_contiguous_storage(pars)
f = cgt.function([], [err,cgt.flatcat(g)])
def runTest(self):
for x in (cgt.scalar('x'), cgt.vector('x'), cgt.matrix('x')):
for cls in (SinCos, SinCos2):
y,z = core.unpack(core.Result(cls(), [x]))
xnum = np.ones((3,)*x.ndim, cgt.floatX)
correct = (np.sin(xnum),np.cos(xnum))
yznum = cgt.numeric_eval([y,z], {x:xnum})
np.testing.assert_allclose(yznum, correct)
f = cgt.function([x],[y,z])
np.testing.assert_allclose(f(xnum), correct)
K = 3
Xval = np.random.randn(N,K)
wval = np.random.randn(K)
bval = np.random.randn()
yval = np.random.randn(N)
X_nk = cgt.matrix("X")
y_n = cgt.vector("y")
w_k = cgt.vector("w")
b = cgt.scalar(name="b")
ypred = cgt.dot(X_nk, w_k) + b
err = cgt.sum(cgt.square(ypred - y_n))
g = cgt.grad(err, [w_k, b])
g_simple,an = cgt.core.simplify_and_analyze(g)
print "Loss function:"
cgt.print_tree([err])
print "Gradient:"
cgt.print_tree(g)
print "Gradient simplified"
cgt.print_tree(g_simple, nodefn=lambda node,o: o.write(" " + an["node2hash"][node][:5]))
print "-------"
d = {X_nk : Xval, w_k : wval, b : bval, y_n : yval}
Xval = np.random.randn(N,K)
wval = np.random.randn(K)
bval = np.random.randn()
yval = np.random.randn(N)
X_nk = cgt.shared(Xval, "X", device=cgt.Device(devtype='gpu'))
y_n = cgt.shared(yval, "y")
w_k = cgt.shared(wval, "w")
b = cgt.shared(bval, name="b")
print "bval",bval
ypred = cgt.dot(cgt.square(X_nk), w_k) + b
err = cgt.sum(cgt.square(ypred - y_n))
g = cgt.grad(err, [w_k, b])
outputs = [err]
def devfn(node):
if isinstance(node, cgt.Result) and node.op == err.op: # XXX add fn for this
return cgt.Device(devtype="cpu")
func=cgt.VarSizeFunc([], outputs, devfn = devfn)
def writedev(node,o):
o.write(" | device: %s"%func.node2device[node])
cgt.print_tree(func.outputs, nodefn=writedev)
print "ready..."
numerr = func()
if key == "conj":
print "skipping conj"
continue
utils.colorprint(utils.Color.YELLOW, "Testing %s\n"%key)
if cls == core.ElwiseUnary:
n_in = 1
op = cls(key)
else:
n_in = 2
op = cls(key, (True,True))
inputvars = vars[0:n_in]
inputvals = vals[0:n_in]
out = core.Result(op, inputvars)
f = cgt.function(inputvars, out)
try:
grads = cgt.grad(out, inputvars)
except core.NonDifferentiable:
print "nondiff"
continue
if DISPLAY:
print "Function:"
cgt.print_tree(out)
print "Gradient original:"
cgt.print_tree(grads)
print "Gradient simplified:"
grads_simple = core.simplify(grads)
if DISPLAY: cgt.print_tree(grads_simple)
gradf = cgt.function(inputvars, grads)
nugrad = numeric_grad(lambda li: f(*li), inputvals) #pylint: disable=W0640
cgtgrad = gradf(*inputvals)
np.testing.assert_almost_equal(nugrad,cgtgrad,decimal=6)