Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def cov(self):
x = self.histogram - self.mean
return x.T.dot(x) / pm.floatX(self.histogram.shape[0])
n_win : int
Number of past steps to calculate scales of parameter gradients.
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
"""
if loss_or_grads is None and params is None:
return partial(adagrad_window, **_get_call_kwargs(locals()))
elif loss_or_grads is None or params is None:
raise ValueError('Please provide both `loss_or_grads` and `params` to get updates')
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
i = theano.shared(pm.floatX(0))
i_int = i.astype('int32')
value = param.get_value(borrow=True)
accu = theano.shared(
np.zeros(value.shape + (n_win,), dtype=value.dtype))
# Append squared gradient vector to accu_new
accu_new = tt.set_subtensor(accu[..., i_int], grad ** 2)
i_new = tt.switch((i + 1) < n_win, i + 1, 0)
updates[accu] = accu_new
updates[i] = i_new
accu_sum = accu_new.sum(axis=-1)
updates[param] = param - (learning_rate * grad /
tt.sqrt(accu_sum + epsilon))
return updates
def build_model():
data = np.loadtxt(pm.get_data('efron-morris-75-data.tsv'), delimiter="\t",
skiprows=1, usecols=(2,3))
atbats = pm.floatX(data[:,0])
hits = pm.floatX(data[:,1])
N = len(hits)
# we want to bound the kappa below
BoundedKappa = pm.Bound(pm.Pareto, lower=1.0)
with pm.Model() as model:
phi = pm.Uniform('phi', lower=0.0, upper=1.0)
kappa = BoundedKappa('kappa', alpha=1.0001, m=1.5)
thetas = pm.Beta('thetas', alpha=phi*kappa, beta=(1.0-phi)*kappa, shape=N)
ys = pm.Binomial('ys', n=atbats, p=thetas, observed=hits)
return model
def randidx(self, size=None):
if size is None:
size = (1,)
elif isinstance(size, tt.TensorVariable):
if size.ndim < 1:
size = size[None]
elif size.ndim > 1:
raise ValueError("size ndim should be no more than 1d")
else:
pass
else:
size = tuple(np.atleast_1d(size))
return self._rng.uniform(
size=size,
low=pm.floatX(0),
high=pm.floatX(self.histogram.shape[0]) - pm.floatX(1e-16),
).astype("int32")
def __call__(self, x):
neg_value = np.float64(self.logp_func(pm.floatX(x)))
value = -1.0 * nan_to_high(neg_value)
if self.use_gradient:
neg_grad = self.dlogp_func(pm.floatX(x))
if np.all(np.isfinite(neg_grad)):
self.previous_x = x
grad = nan_to_num(-1.0 * neg_grad)
grad = grad.astype(np.float64)
else:
self.previous_x = x
grad = None
if self.n_eval % 10 == 0:
self.update_progress_desc(neg_value, grad)
if self.n_eval > self.maxeval:
self.update_progress_desc(neg_value, grad)
raise StopIteration
self.n_eval += 1
start = self.model.test_point
else:
start_ = start.copy()
update_start_vals(start_, self.model.test_point, self.model)
start = start_
if self.batched:
start = start[self.group[0].name][0]
else:
start = self.bij.map(start)
rho = np.zeros((self.ddim,))
if self.batched:
start = np.tile(start, (self.bdim, 1))
rho = np.tile(rho, (self.bdim, 1))
return {
"mu": theano.shared(pm.floatX(start), "mu"),
"rho": theano.shared(pm.floatX(rho), "rho"),
}
def __call__(self, x):
neg_value = np.float64(self.logp_func(pm.floatX(x)))
value = -1.0 * nan_to_high(neg_value)
if self.use_gradient:
neg_grad = self.dlogp_func(pm.floatX(x))
if np.all(np.isfinite(neg_grad)):
self.previous_x = x
grad = nan_to_num(-1.0 * neg_grad)
grad = grad.astype(np.float64)
else:
self.previous_x = x
grad = None
if self.n_eval % 10 == 0:
self.update_progress_desc(neg_value, grad)
if self.n_eval > self.maxeval:
self.update_progress_desc(neg_value, grad)
def rslice(self, total, size, seed):
if size is None:
return slice(None)
elif isinstance(size, int):
rng = pm.tt_rng(seed)
Minibatch.RNG[id(self)].append(rng)
return (rng
.uniform(size=(size, ), low=0.0, high=pm.floatX(total) - 1e-16)
.astype('int64'))
else:
raise TypeError('Unrecognized size type, %r' % size)
def randidx(self, size=None):
if size is None:
size = (1,)
elif isinstance(size, tt.TensorVariable):
if size.ndim < 1:
size = size[None]
elif size.ndim > 1:
raise ValueError("size ndim should be no more than 1d")
else:
pass
else:
size = tuple(np.atleast_1d(size))
return self._rng.uniform(
size=size,
low=pm.floatX(0),
high=pm.floatX(self.histogram.shape[0]) - pm.floatX(1e-16),
).astype("int32")
else:
ret = data.values
elif hasattr(data, 'mask'):
if data.mask.any():
ret = data
else: # empty mask
ret = data.filled()
elif isinstance(data, theano.gof.graph.Variable):
ret = data
elif sps.issparse(data):
ret = data
elif isgenerator(data):
ret = generator(data)
else:
ret = np.asarray(data)
return pm.floatX(ret)