Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Update current W
x['net']['weights']['W'] = W
def update(self, x, n):
""" Sample a single column of the network (all the incoming
coupling filters). This is a parallelizable chunk.
"""
# Precompute the filtered currents from other GLMs
I_bias, I_stim, I_imp = self._precompute_currents(x, n)
self._sample_column_of_A(n, x, I_bias, I_stim, I_imp)
self._sample_column_of_W(n, x, I_bias, I_stim, I_imp)
return x
class LatentLocationUpdate(MetropolisHastingsUpdate):
"""
Gibbs sample the parameters of a latent distance model, namely the
latent locations (if they are not given) and the distance scale.
"""
def __init__(self):
super(LatentLocationUpdate, self).__init__()
# Use HMC if the locations are continuous
# Otherwise, use a Metropolis-Hastings update
self.avg_accept_rate = 0.9
self.step_sz = 0.001
def preprocess(self, population):
self.N = population.model['N']
# Get the location model(s)
i,j = np.unravel_index(ij, (d1,d2), order='C')
L[n,0] = prior.min0 + i
L[n,1] = prior.min1 + j
else:
raise Exception('Only supporting Categorical and JointCategorical location priors')
# Update current L
if not self._check_bounds(L):
import pdb; pdb.set_trace()
x['latent'][self.location.name]['L'] = L.ravel()
return x
class SharedTuningCurveUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample continuous latent locations
"""
def __init__(self):
self.n_steps = 2
self.avg_accept_rate = 0.9
self.step_sz = 0.1
def preprocess(self, population):
self.population = population
self.glm = self.population.glm
self.N = population.N
# Get the shared tuning curve component
from pyglm.components.latent import LatentTypeWithTuningCurve
self.tc_model = None
x['latent'][latent_type.name]['Y'] = Y
# Update alpha with the conjugate dirichlet prior
from pyglm.components.priors import Dirichlet
if isinstance(latent_type.alpha_prior, Dirichlet):
suffstats = latent_type.alpha_prior.alpha0.get_value()
suffstats += np.bincount(Y, minlength=R)
alpha = np.random.dirichlet(suffstats)
x['latent'][latent_type.name]['alpha'] = alpha
else:
raise Warning('Cannot update alpha prior!')
return x
class LatentLocationAndTypeUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample discrete latent locations on a grid
along with the type of the neuron
"""
def __init__(self):
raise NotImplementedError('Joint update of location and type has not yet been implemented!')
def preprocess(self, population):
self.N = population.N
self.population = population
self.syms = population.get_variables()
# Get the shared tuning curve component
from pyglm.components.latent import LatentType
self.latent_types = []
for latent_component in population.latent.latentlist:
adaptive_step_sz=True,
avg_accept_rate=self.avg_accept_rate)
# Update step size and accept rate
self.step_sz = new_step_sz
# print "Step: ", self.step_sz
self.avg_accept_rate = new_accept_rate
# print "Accept: ", self.avg_accept_rate
# Update current L
x['latent'][self.location.name]['L'] = L.reshape(self.L_shape)
return x
class _DiscreteLatentLocationUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample discrete latent locations on a grid
"""
def __init__(self, latent_location_component):
self.location = latent_location_component
def preprocess(self, population):
self.N = population.N
self.population = population
self.syms = population.get_variables()
self.L = self.location.Lmatrix
# Compute the log probability and its gradients, taking into
# account the prior and the likelihood of any consumers of the
# location.
self.log_p = self.location.log_p
# Gibbs sample from the 2d distribution
ij = log_sum_exp_sample(lnp.ravel(order='C'))
i,j = np.unravel_index(ij, (d1,d2), order='C')
L[n,0] = prior.min0 + i
L[n,1] = prior.min1 + j
else:
raise Exception('Only supporting Categorical and JointCategorical location priors')
# Update current L
if not self._check_bounds(L):
import pdb; pdb.set_trace()
x['latent'][self.location.name]['L'] = L.ravel()
return x
class _DiscreteLocalGibbsLatentLocationUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample discrete latent locations on a grid
This is a Metropolis-Hastings update that takes local steps proportional
to their relative probability.
"""
def __init__(self, latent_location_component):
self.location = latent_location_component
def preprocess(self, population):
self.N = population.N
self.population = population
self.glm = self.population.glm
self.syms = population.get_variables()
self.L = self.location.Lmatrix
self.Lflat = self.location.Lflat
update = _DiscreteLocalGibbsLatentLocationUpdate(latent_component)
else:
update = _ContinuousLatentLocationUpdate(latent_component)
update.preprocess(population)
self.location_updates.append(update)
def update(self, x):
"""
Update each location update in turn
"""
for update in self.location_updates:
x = update.update(x)
return x
class _ContinuousLatentLocationUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample continuous latent locations
"""
def __init__(self, latent_location_component):
self.location = latent_location_component
def preprocess(self, population):
self.syms = population.get_variables()
# Get the shape of L
# TODO: Fix this hack!
self.L = self.location.L
self.L_shape = population.sample()['latent'][self.location.name]['L'].shape
# Compute the log probability and its gradients, taking into
# account the prior and the likelihood of any consumers of the
if np.log(np.random.rand()) < lp_prop - lp_curr:
L[n,:] = L_prop
# print "%d: [%d,%d]->[%d,%d]" % (n, L_curr[0], L_curr[1], L_prop[0],L_prop[1])
else:
L[n,:] = L_curr
# print "%d: [%d,%d]->[%d,%d]" % (n, L_curr[0], L_curr[1], L_curr[0],L_curr[1])
# Update current L
if not self._check_bounds(L):
import pdb; pdb.set_trace()
x['latent'][self.location.name]['L'] = L.ravel()
return x
class _DiscreteGibbsLatentLocationUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample discrete latent locations on a grid
"""
def __init__(self, latent_location_component):
self.location = latent_location_component
def preprocess(self, population):
self.N = population.N
self.population = population
self.syms = population.get_variables()
self.L = self.location.Lmatrix
# Compute the log probability and its gradients, taking into
# account the prior and the likelihood of any consumers of the
# location.
self.log_p = self.location.log_p
def target_variables(self):
# Return a list of variables that this update applies to
return []
def preprocess(self, population):
""" Do any req'd preprocessing
"""
pass
def update(self, x_curr):
""" Take a MH step
"""
return x_curr
class ParallelMetropolisHastingsUpdate(MetropolisHastingsUpdate):
""" Extending this class indicates that the updates can be
performed in parallel over n, the index of the neuron.
"""
def update(self, x_curr, n):
""" Take a MH step for the n-th neuron. This can be performed in parallel
over other n' \in [N]
"""
pass
# class HmcGlmUpdate(ParallelMetropolisHastingsUpdate):
# """
# Update the continuous and unconstrained GLM parameters using Hamiltonian
# Monte Carlo. Stochastically follow the gradient of the parameters using
# Hamiltonian dynamics.
# """
# def __init__(self):
if np.log(np.random.rand()) < lnp_accept:
L[n,:] = np.array(prop_loc)
else:
# Reject and stay in current loc
L[n,:] = np.array(curr_loc)
# Update current L
if not self._check_bounds(L):
import pdb; pdb.set_trace()
x['latent'][self.location.name]['L'] = L.ravel()
return x
class LatentTypeUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample discrete latent locations on a grid
"""
def __init__(self):
pass
def preprocess(self, population):
self.N = population.N
self.population = population
self.syms = population.get_variables()
# Get the shared tuning curve component
from pyglm.components.latent import LatentType
self.latent_types = []
for latent_component in population.latent.latentlist:
if isinstance(latent_component, LatentType):