How to use the pymc.gp function in pymc

To help you get started, we’ve selected a few pymc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aflaxman / gbd / tests / run_similarity_validation.py View on Github external
else:
        N = int(options.numberofrows)
        delta_true = float(options.delta)
        replicate = int(options.replicate)
        bias = float(options.bias)
        sigma_prior = float(options.sigma)

        print 'Running random effects validation for:'
        print 'N', N
        print 'delta_true', delta_true
        print 'bias', bias
        print 'sigma_prior', sigma_prior
        print 'replicate', replicate

        M = gp.Mean(validate_similarity.quadratic)
        C = gp.Covariance(gp.matern.euclidean, amp=1., diff_degree=2, scale=50)
        gp.observe(M, C, [0, 30, 100], [-5, -3, -5])

        true = {}
        lp = gp.Realization(M, C)
        true_p = lambda x: pl.exp(lp(x))

        model = validate_similarity.generate_data(N, delta_true, true_p, 'Unusable', bias, sigma_prior)
        for het in 'Very Moderately Slightly'.split():
            model.parameters['p']['heterogeneity'] = het
            validate_similarity.fit(model)
            model.results.to_csv('%s/%s/%s-%s-%s-%s-%s-%s.csv' % (output_dir, validation_name, options.numberofrows, options.delta, het, bias, sigma_prior, options.replicate))
github aflaxman / gbd / tests / run_similarity_validation.py View on Github external
else:
        N = int(options.numberofrows)
        delta_true = float(options.delta)
        replicate = int(options.replicate)
        bias = float(options.bias)
        sigma_prior = float(options.sigma)

        print 'Running random effects validation for:'
        print 'N', N
        print 'delta_true', delta_true
        print 'bias', bias
        print 'sigma_prior', sigma_prior
        print 'replicate', replicate

        M = gp.Mean(validate_similarity.quadratic)
        C = gp.Covariance(gp.matern.euclidean, amp=1., diff_degree=2, scale=50)
        gp.observe(M, C, [0, 30, 100], [-5, -3, -5])

        true = {}
        lp = gp.Realization(M, C)
        true_p = lambda x: pl.exp(lp(x))

        model = validate_similarity.generate_data(N, delta_true, true_p, 'Unusable', bias, sigma_prior)
        for het in 'Very Moderately Slightly'.split():
            model.parameters['p']['heterogeneity'] = het
            validate_similarity.fit(model)
            model.results.to_csv('%s/%s/%s-%s-%s-%s-%s-%s.csv' % (output_dir, validation_name, options.numberofrows, options.delta, het, bias, sigma_prior, options.replicate))
github aflaxman / gbd / dismod3 / gp_logit_model.py View on Github external
logit_val = -10.
        elif d_val == 1:
            logit_val = 10.
        else:
            logit_val = mc.logit(d_val)

        d_se = dm.se_per_1(d)
        if d_se == MISSING:
            logit_V = 10. #TODO: make this a function of the max of other variables
        elif d_se == 0.:
            logit_V = .1
        else:
            logit_V = (1/d_val + 1/(1-d_val))**2 * d_se**2
        

        gp.observe(M, C, [[a0,a1]], [logit_val], [logit_V])

        a2 = .5 * (a0 + a1 + 1)
        gp.observe(M, C, [[a2,a2]], [logit_val], [logit_V])

    prior_str = dm.get_priors(key)
    for p1 in prior_str.split(','):
        p = p1.split()
        if len(p) > 0 and p[0] == 'zero':
            a0 = int(p[1])
            a1 = int(p[2])
            gp.observe(M, C, [[a,a] for a in range(a0,a1+1)],
                       [-10. for a in range(a0,a1+1)],
                       [1. for a in range(a0,a1+1)])
            #gp.observe(M, C, [[a,a+5] for a in range(a0,a1+1-5)],
            #           [-10. for a in range(a0,a1+1)],
            #           [1. for a in range(a0,a1+1)])
github aflaxman / gbd / old_src / dismod3 / probabilistic_utils.py View on Github external
def uninformative_prior_gp(c=-10.,  diff_degree=2., amp=100., scale=200.):
    """
    return mean and covariance objects for an uninformative prior on
    the age-specific rate
    """
    M = gp.Mean(const_func, c=c)
    C = gp.Covariance(gp.matern.euclidean, diff_degree=diff_degree,
                      amp=amp, scale=scale)

    return M,C
github malaria-atlas-project / geo-gpu / geo_gpu / Covariance.py View on Github external
from cholesky import*
from cov_funs import *
from distances import *
from common import *
import pycuda as cuda
import pymc
import numpy as np
import warnings


# TODO: Observations!
# TODO: Nuggets in Cholesky decomps
# TODO: Diagonal calls
class CudaCovariance(pymc.gp.Covariance):
    """A class mirroring pymc.gp.Covariance, but with all computations done on a GPU."""
    def __init__(self, distance, covariance):
        self.distance = distance
        self.covariance = covariance
        self.ndim = None
        self.observed = False
        
        if self.distance.dtype != self.covariance.dtype:
            raise ValueError, 'Distance function has dtype %s, but covariance function has dtype %s.'%(self.distance.dtype, self.covariance.dtype)

        self.dtype = self.covariance.dtype
        
    def gpu_call(self, x, y, symm=False):
        
        # Remember shape of x, and then 'regularize' it.
        orig_shape = np.shape(x)
github aflaxman / gbd / book / age_patterns.py View on Github external
Moderately=dismod3.utils.rho['moderately'],
             Slightly=dismod3.utils.rho['slightly'])

for col, smoothness in enumerate(['Slightly', 'Moderately', 'Very']):

    ## setup lognormal prior on intercept
    gamma = mc.Normal('gamma', 0.,
                      2.**-2,
                      value=pl.log(data[:,1].mean()))
    mu = mc.Lambda('mu', lambda gamma=gamma: pl.exp(gamma))

    ## setup Gaussian Process prior on age pattern
    @mc.deterministic
    def M(mu=mu):
        return mc.gp.Mean(lambda x: mu*pl.ones(len(x)))
    C = mc.gp.FullRankCovariance(mc.gp.matern.euclidean,
                                 amp=data[:,1].max(),
                                 scale=scale[smoothness],
                                 diff_degree=2)
    sm = mc.gp.GPSubmodel('sm', M, C, mesh,
                          init_vals=mu.value*pl.ones_like(mesh))

    ## condition on rate being positive
    @mc.potential
    def positive(f=sm.f_eval):
        if pl.any(f < 0.):
            return -pl.inf
        else:
            return 0.

    ## likelihood of observed data, using normal model for simplicity
    @mc.deterministic
github pymc-devs / pymc3 / pymc / gp / GPutils.py View on Github external
"""
    Evaluates M(x) and C(x).
    
    Minimizes computation; evaluating M(x) and C(x) separately would
    evaluate the off-diagonal covariance term twice, but callling
    point_eval(M,C,x) would only evaluate it once.
    
    Also chunks the evaluations if the off-diagonal term.
    """
    
    x_ = regularize_array(x)
    
    M_out = empty(x_.shape[0])
    V_out = empty(x_.shape[0])
    
    if isinstance(C, pymc.gp.BasisCovariance):
        y_size = len(C.basis)
    elif C.obs_mesh is not None:
        y_size = C.obs_mesh.shape[0]
    else:
        y_size = 1
    
    n_chunks = ceil(y_size*x_.shape[0]/float(chunksize))
    bounds = array(linspace(0,x_.shape[0],n_chunks+1),dtype='int')
    cmin=bounds[:-1]
    cmax=bounds[1:]
    for (cmin,cmax) in zip(bounds[:-1],bounds[1:]):           
        x__ = x_[cmin:cmax] 
        V_out[cmin:cmax], Uo_Cxo = C(x__, regularize=False, return_Uo_Cxo=True)
        M_out[cmin:cmax] = M(x__, regularize=False, Uo_Cxo=Uo_Cxo)

    if len(x.shape) > 1:
github aflaxman / gbd / old_src / dismod3 / models / population.py View on Github external
def gaussian_process(self):
        """
        return a PyMC Gaussian Process mean and covariance to interpolate
        the population-by-age mesh/value data
        """
        from pymc import gp
        from dismod3.bayesian_models import probabilistic_utils

        M, C = probabilistic_utils.uninformative_prior_gp(c=0.,  diff_degree=2., amp=10., scale=200.)
        gp.observe(M, C, self.data['mesh'], self.data['vals'], 0.0)
    
        return M, C
github pymc-devs / pymc3 / pymc / examples / gp / PyMCmodel.py View on Github external
# Prior parameters of M
a = pm.Normal('a', mu=1., tau=1.)
b = pm.Normal('b', mu=.5, tau=1.)
c = pm.Normal('c', mu=2., tau=1.)

# The mean M is valued as a Mean object.
def linfun(x, a, b, c):
    # return a * x ** 2 + b * x + c
    return 0.*x + c
@pm.deterministic
def M(eval_fun = linfun, a=a, b=b, c=c):
    return gp.Mean(eval_fun, a=a, b=b, c=c)

# The GP submodel
fmesh = np.linspace(-np.pi/3.3,np.pi/3.3,4)
sm = gp.GPSubmodel('sm',M,C,fmesh)

# Observation precision
V = .0001

# The data d is just array-valued. It's normally distributed about GP.f(obs_x).
init_val = np.random.normal(size=len(fmesh))
d = pm.Normal('d',mu=sm.f_eval, tau=1./V, value=init_val, observed=True)
github aflaxman / gbd / space_time_model / model.py View on Github external
        @mc.deterministic(name='C_%d'%i)
        def C_i(i=i, grid=grid, sigma_f=sigma_f, tau_f=tau_f, diff_degree=diff_degree):
            return gp.matern.euclidean(grid, grid, amp=sigma_f[i], scale=tau_f[i], diff_degree=diff_degree[i])
        C.append(C_i)

pymc

Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with PyTensor

Apache-2.0
Latest version published 17 days ago

Package Health Score

90 / 100
Full package analysis