How to use the pyglm.utils.theano_func_wrapper._flatten function in PyGLM

To help you get started, we’ve selected a few PyGLM examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github slinderman / theano_pyglm / pyglm / inference / coord_descent.py View on Github external
def prep_first_order_glm_inference(population):
    """ Initialize functions that compute the gradient and Hessian of
        the log probability with respect to the differentiable GLM
        parameters, e.g. the weight matrix if it exists.
    """
    glm = population.glm
    syms = population.get_variables()

    # Compute gradients of the log prob wrt the GLM parameters
    glm_syms = differentiable(syms['glm'])

    print "Computing gradient of the prior w.r.t. the differentiable GLM parameters"
    g_glm_logprior, _ = grad_wrt_list(glm.log_prior, _flatten(glm_syms))

    print "Computing gradient of the GLM likelihood w.r.t. the differentiable GLM parameters"
    g_glm_ll, _ = grad_wrt_list(glm.ll, _flatten(glm_syms))

    # TODO: Replace this with a function that just gets the shapes?
    x0 = population.sample()
    nvars = population.extract_vars(x0, 0)
    dnvars = get_vars(glm_syms, nvars['glm'])
    _,glm_shapes = packdict(dnvars)

    # Private function to compute the log probability and its gradient
    # with respect to a set of parameters
    def nlp(x_glm_vec, x):
        """
        Helper function to compute the negative log posterior for a given set
        of GLM parameters. The parameters are passed in as a vector.
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
# Get the likelihood of the GLM under A
        s = [self.network.graph.A] + \
             _flatten(self.syms['net']['weights']) + \
            [self.glm.n,
             self.glm.bias_model.I_bias,
             self.glm.bkgd_model.I_stim,
             self.glm.imp_model.I_imp] + \
            _flatten(self.syms['glm']['nlin'])

        xv = [A] + \
             _flatten(x['net']['weights']) + \
             [n_post,
              I_bias,
              I_stim,
              I_imp] + \
            _flatten(x['glms'][n_post]['nlin'])

        # Compute the log likelihood for each data sequence
        for data in self.population.data_sequences:
            self.population.set_data(data)
            lp += self.glm.ll.eval(dict(zip(s, xv)))

        return lp
github slinderman / theano_pyglm / pyglm / inference / coord_descent.py View on Github external
def prep_first_order_network_inference(population):
    """ Initialize functions that compute the gradient and Hessian of
        the log probability with respect to the differentiable GLM
        parameters, e.g. the weight matrix if it exists.
    """
    network = population.network
    syms = population.get_variables()

    # Compute gradients of the log prob wrt the GLM parameters
    network_syms = differentiable(syms['net'])

    print "Computing gradient of the network prior w.r.t. the differentiable GLM parameters"
    g_network_logprior, _ = grad_wrt_list(network.log_p, _flatten(network_syms))

    # TODO: Replace this with a function that just gets the shapes?
    x0 = population.sample()
    nvars = population.extract_vars(x0, 0)
    dnvars = get_vars(network_syms, nvars['net'])
    _,network_shapes = packdict(dnvars)

    # Private function to compute the log probability and its gradient
    # with respect to a set of parameters
    def nlp(x_network_vec, x):
        """
        Helper function to compute the negative log posterior for a given set
        of GLM parameters. The parameters are passed in as a vector.
        """
        x_network = unpackdict(x_network_vec, network_shapes)
        set_vars(network_syms, x['net'], x_network)
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
the log probability with respect to the differentiable GLM
            parameters, e.g. the weight matrix if it exists.
        """
        self.population = population
        self.glm = population.glm
        self.bias_model = self.glm.bias_model
        self.syms = population.get_variables()
        self.bias_syms = differentiable(self.syms['glm']['bias'])

        # Compute gradients of the log prob wrt the GLM parameters
        self.glm_logp = self.glm.log_p
        # self.g_glm_logp_wrt_bias, _ = grad_wrt_list(self.glm_logp,
        #                                            _flatten(self.bias_syms))

        self.g_glm_ll_wrt_bias, _ = grad_wrt_list(self.glm.ll,
                                                   _flatten(self.bias_syms))

        self.g_bias_logp_wrt_bias, _ = grad_wrt_list(self.bias_model.log_p,
                                                   _flatten(self.bias_syms))


        # Get the shape of the parameters from a sample of variables
        self.glm_shapes = get_shapes(self.population.extract_vars(self.population.sample(),0)['glm']['bias'],
                                     self.bias_syms)
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
the log probability with respect to the differentiable GLM
            parameters, e.g. the weight matrix if it exists.
        """
        self.population = population
        self.glm = population.glm
        self.syms = population.get_variables()
        self.bkgd_syms = differentiable(self.syms['glm']['bkgd'])

        # Compute gradients of the log prob wrt the GLM parameters
        self.glm_logprior = self.glm.log_prior
        self.g_glm_logprior_wrt_bkgd, _ = grad_wrt_list(self.glm_logprior,
                                                   _flatten(self.bkgd_syms))

        self.glm_ll = self.glm.ll
        self.g_glm_ll_wrt_bkgd, _ = grad_wrt_list(self.glm_ll,
                                                   _flatten(self.bkgd_syms))

        # Get the shape of the parameters from a sample of variables
        self.glm_shapes = get_shapes(self.population.extract_vars(self.population.sample(),0)['glm']['bkgd'],
                                     self.bkgd_syms)
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
# Get the prior probability of A
        g_lp = seval(self.g_netlp_wrt_W,
                     self.syms['net'],
                     x['net'])

        # Get the likelihood of the GLM under W
        s = _flatten(self.syms['net']['graph']) + \
            [self.network.weights.W_flat,
             self.glm.n,
             self.glm.bias_model.I_bias,
             self.glm.bkgd_model.I_stim,
             self.glm.imp_model.I_imp] + \
            _flatten(self.syms['glm']['nlin'])

        xv = _flatten(x['net']['graph']) + \
             [W,
              n_post,
              I_bias,
              I_stim,
              I_imp] + \
             _flatten(x['glms'][n_post]['nlin'])

        # Compute the log likelihood for each data sequence
        for data in self.population.data_sequences:
            self.population.set_data(data)
            g_lp += seval(self.g_glmll_wrt_W,
                          dict(zip(range(len(s)), s)),
                          dict(zip(range(len(xv)),xv)))

        # Ignore gradients wrt columns other than n_post
        g_mask = np.zeros((self.N,self.N))
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
# Get the likelihood of the GLM under W
        s = _flatten(self.syms['net']['graph']) + \
            [self.network.weights.W_flat,
             self.glm.n,
             self.glm.bias_model.I_bias,
             self.glm.bkgd_model.I_stim,
             self.glm.imp_model.I_imp] + \
            _flatten(self.syms['glm']['nlin'])

        xv = _flatten(x['net']['graph']) + \
             [W,
              n_post,
              I_bias,
              I_stim,
              I_imp] + \
             _flatten(x['glms'][n_post]['nlin'])

        # Compute the log likelihood for each data sequence
        for data in self.population.data_sequences:
            self.population.set_data(data)
            lp += self.glm.ll.eval(dict(zip(s, xv)))

        return lp
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
def _grad_lp_W(self, W, x, n_post, I_bias, I_stim, I_imp):
        """ Compute the log probability for a given column W[:,n_post]
        """
        # Set A in state dict x
        set_vars('W', x['net']['weights'], W)

        # Get the prior probability of A
        g_lp = seval(self.g_netlp_wrt_W,
                     self.syms['net'],
                     x['net'])

        # Get the likelihood of the GLM under W
        s = _flatten(self.syms['net']['graph']) + \
            [self.network.weights.W_flat,
             self.glm.n,
             self.glm.bias_model.I_bias,
             self.glm.bkgd_model.I_stim,
             self.glm.imp_model.I_imp] + \
            _flatten(self.syms['glm']['nlin'])

        xv = _flatten(x['net']['graph']) + \
             [W,
              n_post,
              I_bias,
              I_stim,
              I_imp] + \
             _flatten(x['glms'][n_post]['nlin'])

        # Compute the log likelihood for each data sequence
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
the log probability with respect to the differentiable GLM
            parameters, e.g. the weight matrix if it exists.
        """
        self.population = population
        self.glm = population.glm
        self.syms = population.get_variables()
        self.impulse_syms = differentiable(self.syms['glm']['imp'])

        # Compute gradients of the log prob wrt the GLM parameters
        self.glm_logprior = self.glm.log_prior
        self.g_glm_logprior_wrt_imp, _ = grad_wrt_list(self.glm_logprior,
                                                   _flatten(self.impulse_syms))

        self.glm_ll = self.glm.ll
        self.g_glm_ll_wrt_imp, _ = grad_wrt_list(self.glm_ll,
                                                   _flatten(self.impulse_syms))

        # Get the shape of the parameters from a sample of variables
        self.glm_shapes = get_shapes(self.population.extract_vars(self.population.sample(),0)['glm']['imp'],
                                     self.impulse_syms)
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
# Get the prior probability of A
        lp = seval(self.network.log_p,
                   self.syms['net'],
                   x['net'])

        # Get the likelihood of the GLM under W
        s = _flatten(self.syms['net']['graph']) + \
            [self.network.weights.W_flat,
             self.glm.n,
             self.glm.bias_model.I_bias,
             self.glm.bkgd_model.I_stim,
             self.glm.imp_model.I_imp] + \
            _flatten(self.syms['glm']['nlin'])

        xv = _flatten(x['net']['graph']) + \
             [W,
              n_post,
              I_bias,
              I_stim,
              I_imp] + \
             _flatten(x['glms'][n_post]['nlin'])

        # Compute the log likelihood for each data sequence
        for data in self.population.data_sequences:
            self.population.set_data(data)
            lp += self.glm.ll.eval(dict(zip(s, xv)))

        return lp