How to use the pymc.Model function in pymc

To help you get started, we’ve selected a few pymc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pymc-devs / pymc3 / pymc / sandbox / test_model_ave.py View on Github external
def test(self):

        # Changepoint model
        M1 = Model(model_1)

        # Constant rate model
        M2 = Model(model_2)

        # Exponentially varying rate model
        M3 = Model(model_3)

        # print 'Docstring of model 1:'
        # print model_1.__doc__
        # print 'Docstring of model 2:'
        # print model_2.__doc__
        # print 'Docstring of model 3:'
        # print model_3.__doc__


        posterior = weight([M1,M2,M3],10000)[0]

        # print 'Log posterior probability of changepoint model: ',log(posterior[M1])
        # print 'Log posterior probability of constant rate model: ',log(posterior[M2])
github pymc-devs / pymc3 / tests / models.py View on Github external
def simple_2model():
    mu = -2.1
    tau = 1.3
    p = .4
    with Model() as model:
        x = pm.Normal('x', mu,tau, testval = .1)
        y = pm.Bernoulli('y', p)

    return model.test_point, model
github akelleh / causality / causality / inference / independence_tests / __init__.py View on Github external
        @pymc.stochastic(name='joint_sample')
        def ci_joint(value=self.mcmc_initialization):
            def logp(value):
                xi = [value[i] for i in range(len(x))]
                yi = [value[i+len(x)] for i in range(len(y))]
                zi = [value[i+len(x)+len(y)] for i in range(len(z))] 
                if len(z) == 0:
                    log_px_given_z = np.log(self.densities[0].pdf(data_predict=xi))
                    log_py_given_z = np.log(self.densities[1].pdf(data_predict=yi))
                    log_pz = 0.
                else:
                    log_px_given_z = np.log(self.densities[0].pdf(endog_predict=xi, exog_predict=zi))
                    log_py_given_z =np.log(self.densities[1].pdf(endog_predict=yi, exog_predict=zi))
                    log_pz = np.log(self.densities[2].pdf(data_predict=zi))
                return log_px_given_z + log_py_given_z + log_pz
        model = pymc.Model([ci_joint])
        mcmc = pymc.MCMC(model)
        burn = self.burn
        thin = self.thin
        samples = self.N
        iterations = samples * thin + burn
        mcmc.sample(iter=iterations, burn=burn, thin=thin)
        return pd.DataFrame(mcmc.trace('joint_sample')[:], columns=x+y+z)
github yew1eb / machine-learning / other_code / mixture_model / gmm.py View on Github external
mu_k = pm.Normal('mu_k', np.ones((K, n_features)) *
                     mu_0, tau_k, value=np.ones((K, n_features)) * mu_0)
    phi_0 = pm.Dirichlet('phi_0', theta=np.ones(K) * alpha)

    @pm.deterministic(dtype=float)
    def phi(value=np.ones(K) / K, phi_0=phi_0):
        val = np.hstack((phi_0, (1 - np.sum(phi_0))))
        return val

    z = pm.Categorical(
        'z', p=phi, value=pm.rcategorical(np.ones(K) / K, size=n_samples))

    # observed variables
    x = pm.Normal('x', mu=mu_k[z], tau=tau_k[z], value=data, observed=True)

    return pm.Model([mu_k, tau_k, phi_0, phi, z, x])
github aflaxman / gbd / data.py View on Github external
def describe_vars(d):
    m = mc.Model(d)

    df = pandas.DataFrame(columns=['type', 'value', 'logp'],
                          index=[n.__name__ for n in m.nodes],
                          dtype=object)
    for n in m.nodes:
        k = n.__name__
        df.ix[k, 'type'] = type(n).__name__

        if hasattr(n, 'value'):
            rav = pl.ravel(n.value)
            if len(rav) == 1:
                df.ix[k, 'value'] = n.value
            elif len(rav) > 1:
                df.ix[k, 'value'] = '%.1f, ...' % rav[0]

        df.ix[k, 'logp'] = getattr(n, 'logp', pl.nan)
github chrisgorgo / alleninf / alleninf / analysis.py View on Github external
def bayesian_random_effects(data, labels, group, n_samples=2000, n_burnin=500):
    import pymc as pm
    #preparing the data
    donors = data[group].unique()
    donors_lookup = dict(zip(donors, range(len(donors))))
    data['donor_code'] = data[group].replace(donors_lookup).values
    n_donors = len(data[group].unique())
    donor_idx = data['donor_code'].values
    
    #setting up the model
    with pm.Model() as hierarchical_model:
        # Hyperpriors for group nodes
        group_intercept_mean = pm.Normal('group intercept (mean)', mu=0., sd=100**2)
        group_intercept_variance = pm.Uniform('group intercept (variance)', lower=0, upper=100)
        group_slope_mean = pm.Normal('group slope (mean)', mu=0., sd=100**2)
        group_slope_variance = pm.Uniform('group slope (variance)', lower=0, upper=100)
        
        individual_intercepts = pm.Normal('individual intercepts', mu=group_intercept_mean, sd=group_intercept_variance, shape=n_donors)
        individual_slopes = pm.Normal('individual slopes', mu=group_slope_mean, sd=group_slope_variance, shape=n_donors)
        
        # Model error
        residuals = pm.Uniform('residuals', lower=0, upper=100)
        
        expression_est =  individual_slopes[donor_idx] * data[labels[0]].values + individual_intercepts[donor_idx]
        
        # Data likelihood
        expression_like = pm.Normal('expression_like', mu=expression_est, sd=residuals, observed=data[labels[1]])
github mdekauwe / FitFarquharModel / fit_farquhar_model / fit_MCMC.py View on Github external
Rd = pymc.Uniform("Rd", 0.0, 3.0, value=0.5)

F = FarquharC3(peaked_Jmax=True, peaked_Vcmax=True)

@pymc.deterministic
def farquhar_wrapper(df=df, Vcmax=Vcmax, Jmax=Jmax, Rd=Rd):
    (An, Anc, Anj) = F.calc_photosynthesis(Ci=df["Ci"], Tleaf=df["Tleaf"],
                                           Jmax=Jmax, Vcmax=Vcmax, Rd=Rd)
    return An


y = pymc.Normal('y', mu=farquhar_wrapper, tau=1.0/obs_sigma**2,
                value=obs, observed=True)

N = 100000
model = pymc.Model([y, df, Vcmax, Jmax, Rd])
M = pymc.MCMC(model)
M.sample(iter=N, burn=N*0.1, thin=10)

Vcmax = M.stats()["Vcmax"]['mean']
Jmax = M.stats()["Jmax"]['mean']
Rd = M.stats()["Rd"]['mean']
(An, Anc, Anj) = F.calc_photosynthesis(Ci=df["Ci"], Tleaf=df["Tleaf"],
                                       Jmax=Jmax, Vcmax=Vcmax, Rd=Rd)
rmse = np.sqrt(((obs - An)**2).mean(0))
print "RMSE: %.4f" % (rmse)

# Get the fits
Vcmax = M.trace('Vcmax').gettrace()
Jmax = M.trace('Jmax').gettrace()
Rd = M.trace('Rd').gettrace()
for v in ["Vcmax", "Jmax", "Rd"]:
github mauricevb80 / Probabilistic-Programming-and-Bayesian-Methods-for-Hackers / ExamplesFromChapters / Chapter2 / FreqOfCheaters.py View on Github external
import pymc as pm

p = pm.Uniform("freq_cheating", 0, 1)


@pm.deterministic
def p_skewed(p=p):
    return 0.5 * p + 0.25

yes_responses = pm.Binomial(
    "number_cheaters", 100, p_skewed, value=35, observed=True)

model = pm.Model([yes_responses, p_skewed, p])

# To Be Explained in Chapter 3!
mcmc = pm.MCMC(model)
mcmc.sample(50000, 25000)
github strawlab / best / best / __init__.py View on Github external
def nu(n=nu_minus_one):
        out = n+1
        return out

    @deterministic(plot=False)
    def lam1(s=group1_std):
        out = 1/s**2
        return out
    @deterministic(plot=False)
    def lam2(s=group2_std):
        out = 1/s**2
        return out

    group1 = NoncentralT(name1, group1_mean, lam1, nu, value=y1, observed=True)
    group2 = NoncentralT(name2, group2_mean, lam2, nu, value=y2, observed=True)
    return Model({'group1':group1,
                  'group2':group2,
                  'group1_mean':group1_mean,
                  'group2_mean':group2_mean,
                  })

pymc

Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with PyTensor

Apache-2.0
Latest version published 17 days ago

Package Health Score

90 / 100
Full package analysis