How to use the pymc.Normal function in pymc

To help you get started, we’ve selected a few pymc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aflaxman / gbd / dismod3 / beta_binomial_model.py View on Github external
# set up age-specific rate function, if it does not yet exist
    if not rate_stoch:
        param_mesh = dm.get_param_age_mesh()

        if emp_prior.has_key('mu'):
            initial_value = emp_prior['mu']
        else:
            initial_value = dm.get_initial_value(key)

        # find the logit of the initial values, which is a little bit
        # of work because initial values are sampled from the est_mesh,
        # but the logit_initial_values are needed on the param_mesh
        logit_initial_value = mc.logit(
            interpolate(est_mesh, initial_value, param_mesh))
        
        logit_rate = mc.Normal('logit(%s)' % key,
                               mu=-5.*np.ones(len(param_mesh)),
                               tau=1.e-2,
                               value=logit_initial_value)
        #logit_rate = [mc.Normal('logit(%s)_%d' % (key, a), mu=-5., tau=1.e-2) for a in param_mesh]
        vars['logit_rate'] = logit_rate

        @mc.deterministic(name=key)
        def rate_stoch(logit_rate=logit_rate):
            return interpolate(param_mesh, mc.invlogit(logit_rate), est_mesh)

    if emp_prior.has_key('mu'):
        @mc.potential(name='empirical_prior_%s' % key)
        def emp_prior_potential(f=rate_stoch, mu=emp_prior['mu'], tau=1./np.array(emp_prior['se'])**2):
            return mc.normal_like(f, mu, tau)
        vars['empirical_prior'] = emp_prior_potential
github aflaxman / gbd / old_src / dismod3 / bayesian_models / fit_disease_model.py View on Github external
"""
    Generate a list of the PyMC variables for the generic disease
    model, and store it as dm.vars

    See comments in the code for exact details on the model.
    """
    dm.vars = {}
    out_age_mesh = range(probabilistic_utils.MAX_AGE)

    dm.vars['i'], i = initialized_rate_vars(dm.i_in())
    dm.vars['r'], r = initialized_rate_vars(dm.r_in())
    dm.vars['f'], f = initialized_rate_vars(dm.f_in())
    m = probabilistic_utils.mortality_for(dm, out_age_mesh)
    
    # TODO: make error in C_0 a semi-informative stochastic variable
    logit_C_0 = mc.Normal('logit(C_0)', 0., 1.e-2)
    @mc.deterministic
    def C_0(logit_C_0=logit_C_0):
        return mc.invlogit(logit_C_0)
    
    @mc.deterministic
    def S_0(C_0=C_0):
        return max(0.0, 1.0 - C_0)
    dm.vars['bins'] = [S_0, C_0, logit_C_0]
    
    # iterative solution to difference equations to obtain bin sizes for all ages
    @mc.deterministic
    def S_C_D_M(S_0=S_0, C_0=C_0, i=i, r=r, f=f, m=m):
        S = np.zeros(len(out_age_mesh))
        C = np.zeros(len(out_age_mesh))
        D = np.zeros(len(out_age_mesh))
        M = np.zeros(len(out_age_mesh))
github aflaxman / gbd / dismod3 / logit_normal_model.py View on Github external
sigma_alpha = .01

        beta = np.array(emp_prior['beta'])

        mu_gamma = np.array(emp_prior['gamma'])
        sigma_gamma = emp_prior['sigma']

        mu_sigma = .01
        conf_sigma = 1000.
    else:
        mu_alpha = np.zeros(len(X_region))
        sigma_alpha = 1.

        mu_beta = np.zeros(len(X_study))
        sigma_beta = .01
        beta = mc.Normal('study_coeffs_%s' % key, mu=mu_beta, tau=1/sigma_beta**2, value=mu_beta)
        vars.update(study_coeffs=beta)

        mu_gamma = -5.*np.ones(len(est_mesh))
        sigma_gamma = 1.

        mu_sigma = .1
        conf_sigma = 10.

    alpha = mc.Normal('region_coeffs_%s' % key, mu=mu_alpha, tau=1/sigma_alpha**2, value=mu_alpha)
    vars.update(region_coeffs=alpha)


    log_sigma = mc.Uninformative('log(dispersion_%s)' % key, value=np.log(mu_sigma))
    @mc.deterministic(name='dispersion_%s' % key)
    def sigma(log_sigma=log_sigma):
        return np.exp(log_sigma)
github pymc-devs / pymc3 / docs / guidecode / modelfitting.py View on Github external
"""
   if all(self.stochastic.value != 0.):
       self.proposal_sd = ones(shape(self.stochastic.value)) * \
                           abs(self.stochastic.value) * scale
   else:
       self.proposal_sd = ones(shape(self.stochastic.value)) * scale
"""   



M.use_step_method(pm.AdaptiveMetropolis, [x,y,z], \
                      scales={x:1, y:2, z:.5}, delay=10000)



A = pm.Normal('A', value=numpy.zeros(100), mu=0., tau=1.)



A = [pm.Normal('A_%i'%i, value=0., mu=0., tau=1.) for i in xrange(100)]
github pymc-devs / pymc3 / pymc / examples / gp / PyMCmodel.py View on Github external
# Prior parameters of C
diff_degree = pm.Uniform('diff_degree', 1., 3)
amp = pm.Lognormal('amp', mu=.4, tau=1.)
scale = pm.Lognormal('scale', mu=.5, tau=1.)

# The covariance dtrm C is valued as a Covariance object.
@pm.deterministic
def C(eval_fun = gp.matern.euclidean, diff_degree=diff_degree, amp=amp, scale=scale):
    return gp.NearlyFullRankCovariance(eval_fun, diff_degree=diff_degree, amp=amp, scale=scale)


# Prior parameters of M
a = pm.Normal('a', mu=1., tau=1.)
b = pm.Normal('b', mu=.5, tau=1.)
c = pm.Normal('c', mu=2., tau=1.)

# The mean M is valued as a Mean object.
def linfun(x, a, b, c):
    # return a * x ** 2 + b * x + c
    return 0.*x + c
@pm.deterministic
def M(eval_fun = linfun, a=a, b=b, c=c):
    return gp.Mean(eval_fun, a=a, b=b, c=c)

# The GP submodel
fmesh = np.linspace(-np.pi/3.3,np.pi/3.3,4)
sm = gp.GPSubmodel('sm',M,C,fmesh)

# Observation precision
V = .0001
github armstrtw / pymc_radon / radon_varying_slope.py View on Github external
for i, v in enumerate(counties_uniq):
        counties_dict[v] = i
    ans = np.empty(len(counties),dtype='int')
    for i in range(0,len(counties)):
        ans[i] = counties_dict[counties[i]]
    return ans

index_c = createCountyIndex(counties)

# Priors
mu_b = pymc.Normal('mu_b', mu=0., tau=0.0001)
sigma_b = pymc.Uniform('sigma_b', lower=0, upper=100)
tau_b = pymc.Lambda('tau_b', lambda s=sigma_b: s**-2)

a = pymc.Normal('a', mu=0., tau=0.0001)
b = pymc.Normal('b', mu=mu_b, tau=tau_b, value=np.zeros(len(set(counties))))

sigma_y = pymc.Uniform('sigma_y', lower=0, upper=100)
tau_y = pymc.Lambda('tau_y', lambda s=sigma_y: s**-2)

# Model
@pymc.deterministic(plot=False)
def y_hat(a=a,b=b):
       return a + b[index_c]*x

# Likelihood
@pymc.stochastic(observed=True)
def y_i(value=y, mu=y_hat, tau=tau_y):
    return pymc.normal_like(value,mu,tau)
github pymc-devs / pymc3 / pymc / examples / gp / PyMCmodel.py View on Github external
x = np.arange(-1.,1.,.1)

# Prior parameters of C
diff_degree = pm.Uniform('diff_degree', 1., 3)
amp = pm.Lognormal('amp', mu=.4, tau=1.)
scale = pm.Lognormal('scale', mu=.5, tau=1.)

# The covariance dtrm C is valued as a Covariance object.
@pm.deterministic
def C(eval_fun = gp.matern.euclidean, diff_degree=diff_degree, amp=amp, scale=scale):
    return gp.NearlyFullRankCovariance(eval_fun, diff_degree=diff_degree, amp=amp, scale=scale)


# Prior parameters of M
a = pm.Normal('a', mu=1., tau=1.)
b = pm.Normal('b', mu=.5, tau=1.)
c = pm.Normal('c', mu=2., tau=1.)

# The mean M is valued as a Mean object.
def linfun(x, a, b, c):
    # return a * x ** 2 + b * x + c
    return 0.*x + c
@pm.deterministic
def M(eval_fun = linfun, a=a, b=b, c=c):
    return gp.Mean(eval_fun, a=a, b=b, c=c)

# The GP submodel
fmesh = np.linspace(-np.pi/3.3,np.pi/3.3,4)
sm = gp.GPSubmodel('sm',M,C,fmesh)

# Observation precision
V = .0001
github astroML / astroML / book_figures / chapter10 / fig_matchedfilt_chirp.py View on Github external
@pymc.deterministic
def beta(log_beta=log_beta):
    return np.exp(log_beta)


# uniform prior on log(omega)
@pymc.deterministic
def omega(log_omega=log_omega):
    return np.exp(log_omega)


@pymc.deterministic
def y_model(t=t, b0=b0, A=A, beta=beta, omega=omega):
    return chirp(t, b0, beta, A, omega)

y = pymc.Normal('y', mu=y_model, tau=sigma ** -2, observed=True, value=y_obs)

model = dict(b0=b0, A=A,
             log_beta=log_beta, beta=beta,
             log_omega=log_omega, omega=omega,
             y_model=y_model, y=y)


#----------------------------------------------------------------------
# Run the MCMC sampling (saving results to a pickle)
@pickle_results('matchedfilt_chirp.pkl')
def compute_MCMC_results(niter=20000, burn=2000):
    S = pymc.MCMC(model)
    S.sample(iter=niter, burn=burn)
    traces = [S.trace(s)[:] for s in ['b0', 'A', 'omega', 'beta']]

    M = pymc.MAP(model)
github aflaxman / gbd / book / age_pattern_mesh.py View on Github external
### @export 'models-of-varying-smoothness'
in_mesh = dismod3.settings.gbd_ages
out_mesh = pl.arange(101)
data = pl.array([[10., 1, .25],
                 [50., 2.5, .25]])

scale = dict(Very=dismod3.utils.rho['very'],
             Moderately=dismod3.utils.rho['moderately'],
             Slightly=dismod3.utils.rho['slightly'])

for kind in ['linear', 'zero']:
    pl.figure(**book_graphics.quarter_page_params)
    for col, smoothness in enumerate(['Slightly', 'Moderately', 'Very']):

        ## setup lognormal prior on intercept
        gamma = mc.Normal('gamma', 0.,
                          2.**-2,
                          value=pl.log(data[:,1].mean()))
        mu = mc.Lambda('mu', lambda gamma=gamma: pl.exp(gamma))

        ## setup Gaussian Process prior on age pattern
        @mc.deterministic
        def M(mu=mu):
            return mc.gp.Mean(lambda x: mu*pl.ones(len(x)))
        C = mc.gp.FullRankCovariance(mc.gp.matern.euclidean,
                                     amp=data[:,1].max(),
                                     scale=scale[smoothness],
                                     diff_degree=2)
        sm = mc.gp.GPSubmodel('sm', M, C, in_mesh,
                              init_vals=mu.value*pl.ones_like(in_mesh))

        @mc.deterministic

pymc

Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with PyTensor

Apache-2.0
Latest version published 17 days ago

Package Health Score

90 / 100
Full package analysis