How to use the celerite.terms.JitterTerm function in celerite

To help you get started, we’ve selected a few celerite examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dfm / celerite / tests / test_terms.py View on Github external
        terms.JitterTerm(log_sigma=0.5) + terms.JitterTerm(log_sigma=0.1),
    ]
)
def test_jitter_jacobian(k, eps=1.34e-7):
    if not terms.HAS_AUTOGRAD:
        with pytest.raises(ImportError):
            jac = k.get_jitter_jacobian()
        return

    v = k.get_parameter_vector()
    jac = k.get_jitter_jacobian()
    assert len(jac) == len(v)
    jac0 = np.empty_like(jac)
    for i, pval in enumerate(v):
        v[i] = pval + eps
        k.set_parameter_vector(v)
        jitter = k.jitter
github dfm / celerite / tests / test_celerite.py View on Github external
        terms.JitterTerm(log_sigma=0.1),
        terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5),
        terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5),
        terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) +
        terms.RealTerm(log_a=0.1, log_c=0.4),
        terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) *
        terms.RealTerm(log_a=0.1, log_c=0.4),
    ], [False, True])
)
def test_grad_log_likelihood(kernel, with_general, seed=42, eps=1.34e-7):
    np.random.seed(seed)
    x = np.sort(np.random.rand(100))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    if with_general:
        U = np.vander(x - np.mean(x), 4).T
github nespinoza / juliet / juliet.py View on Github external
# Now that we know the type of GP, we extract the "instrument" corresponding to each GP 
            # parameter. For example, for the ExpSineSquaredKernel, it might happen the user wants 
            # to have a common GP_Prot parameter shared along many instruments, e.g., GP_Prot_TESS_K2_RV,
            # which means the user wants a common Prot for TESS and K2 photometry, and also for the RVs. However, 
            # the same GP might have a different Gamma, i.e., there might be a GP_Gamma_TESS, GP_Gamma_K2 and GP_Gamma_RV.
            # The idea here is to, e.g., in the case of TESS photometry, gather lc_dictionary[instrument]['GP_Prot'] = 
            # 'TESS_K2_RV', and lc_dictionary[instrument]['GP_Gamma'] = 'TESS':
            for GPvariable in ['B','C','L','Prot']:
                for pnames in priors.keys():
                    vec = pnames.split('_')
                    if (vec[0] == 'GP') and (GPvariable in vec[1]) and (instrument in vec):
                        lc_dictionary[instrument]['GP_'+GPvariable] = '_'.join(vec[2:])
         
            # Jitter term:
            kernel_jitter = terms.JitterTerm(np.log(100*1e-6))

            # Wrap GP object to compute likelihood:
            kernel = rot_kernel + kernel_jitter
            lc_dictionary[instrument]['GPObject'] = celerite.GP(kernel, mean=0.0)
            # Note order of GP Vector: logB, logL, logProt, logC, logJitter
            lc_dictionary[instrument]['GPVector'] = np.zeros(5)
            lc_dictionary[instrument]['GPObject'].compute(lc_dictionary[instrument]['X'],yerr=ferr_lc[instrument_indexes_lc[instrument]])          

        if lc_dictionary[instrument]['GPType'] == 'ExpSineSquaredSEKernel':
            for GPvariable in ['sigma','alpha','Gamma','Prot']:
                for pnames in priors.keys():
                    vec = pnames.split('_')
                    if (vec[0] == 'GP') and (GPvariable in vec[1]) and (instrument in vec):
                        lc_dictionary[instrument]['GP_'+GPvariable] = '_'.join(vec[2:])

            # Generate GP Base Kernel (Constant * ExpSquared * ExpSine2):
github dfm / celerite / paper / figures / error / error.py View on Github external
t = np.sort(np.random.uniform(0, N.max() * 0.8, N.max()))
    yerr = np.random.uniform(1.0, 1.5, len(t))

    for ix, j in enumerate(J):
        kernel = terms.RealTerm(np.random.uniform(-1, 1),
                                np.random.uniform(-5, -1))
        kernel += terms.RealTerm(np.random.uniform(-1, 1),
                                 np.random.uniform(-5, -1))
        while (len(kernel.coefficients[0]) + 2*len(kernel.coefficients[2]) <
               2*j):
            kernel += terms.SHOTerm(
                log_S0=np.random.uniform(-1, 1),
                log_omega0=np.random.uniform(-5, 0),
                log_Q=np.random.uniform(0, 1),
            )
        kernel += terms.JitterTerm(np.random.uniform(-1, 1))
        assert (
            len(kernel.coefficients[0]) + 2*len(kernel.coefficients[2]) == 2*j
        )

        gp = celerite.GP(kernel)

        for iy, n in enumerate(N):
            gp.compute(t[:n], yerr[:n])
            alpha_true = np.random.randn(n)
            args = [kernel.jitter] + list(kernel.coefficients)
            args += [t[:n], alpha_true]
            y = gp.solver.dot(*args)[:, 0] + yerr[:n]**2 * alpha_true

            alpha = gp.apply_inverse(y[:n])[:, 0]
            logdet = gp.solver.log_determinant()
            alpha_error[k, ix, iy] = np.max(np.abs(alpha-alpha_true))
github dfm / celerite / celerite / celerite.py View on Github external
def __init__(self,
                 kernel,
                 mean=0.0, fit_mean=False,
                 log_white_noise=None, fit_white_noise=False):
        self._solver = None
        self._computed = False
        self._t = None
        self._y_var = None

        # Backwards compatibility for 'log_white_noise' parameter
        if log_white_noise is not None:
            warnings.warn("The 'log_white_noise' parameter is deprecated. "
                          "Use a 'JitterTerm' instead.")
            k = terms.JitterTerm(log_sigma=float(log_white_noise))
            if not fit_white_noise:
                k.freeze_parameter("log_sigma")
            kernel += k

        # Build up a list of models for the ModelSet
        models = [("kernel", kernel)]

        # And the mean model
        try:
            float(mean)
        except TypeError:
            pass
        else:
            mean = ConstantModel(float(mean))

        if not fit_mean:
github California-Planet-Search / radvel / radvel / likelihood.py View on Github external
xpred (np.array): numpy array of x values for realizing the GP
            Returns:
                tuple: tuple containing:
                    np.array: numpy array of predictive means \n
                    np.array: numpy array of predictive standard deviations
        """

        self.update_kernel_params()

        B = self.kernel.hparams['gp_B'].value
        C = self.kernel.hparams['gp_C'].value
        L = self.kernel.hparams['gp_L'].value
        Prot = self.kernel.hparams['gp_Prot'].value

        # build celerite kernel with current values of hparams
        kernel = celerite.terms.JitterTerm(
                log_sigma = np.log(self.params[self.jit_param].value)
                )

        kernel += celerite.terms.RealTerm(
            log_a=np.log(B*(1+C)/(2+C)),
            log_c=np.log(1/L)
        )

        kernel += celerite.terms.ComplexTerm(
            log_a=np.log(B/(2+C)),
            log_b=-np.inf,
            log_c=np.log(1/L),
            log_d=np.log(2*np.pi/Prot)
        )

        gp = celerite.GP(kernel)
github nespinoza / juliet / juliet / fit.py View on Github external
self.kernel = 1.*george.kernels.ExpSquaredKernel(np.ones(self.nX),ndim = self.nX, axes = range(self.nX))
            # (Note no jitter kernel is given, as with george one defines this in the george.GP call):
        elif self.kernel_name == 'ExpSineSquaredSEKernel':
            # Generate the kernels:
            K1 = 1.*george.kernels.ExpSquaredKernel(metric = 1.0)
            K2 = george.kernels.ExpSine2Kernel(gamma=1.0,log_period=1.0)
            self.kernel = K1*K2
            # (Note no jitter kernel is given, as with george one defines this in the george.GP call):
        elif self.kernel_name == 'CeleriteQPKernel':
            # Generate rotational kernel:
            rot_kernel = terms.TermSum(RotationTerm(log_amp=np.log(10.),\
                                                    log_timescale=np.log(10.0),\
                                                    log_period=np.log(3.0),\
                                                    log_factor=np.log(1.0)))
            # Jitter term:
            kernel_jitter = terms.JitterTerm(np.log(100*1e-6))
            # Wrap GP kernel and object:
            if self.instrument in ['rv','lc']:
                self.kernel = rot_kernel 
            else:
                self.kernel = rot_kernel + kernel_jitter
            # We are using celerite:
            self.use_celerite = True
        elif self.kernel_name == 'CeleriteExpKernel':
            # Generate exponential kernel:
            exp_kernel = terms.RealTerm(log_a=np.log(10.), log_c=np.log(10.))
            # Jitter term:
            kernel_jitter = terms.JitterTerm(np.log(100*1e-6))
            # Wrap GP kernel and object:
            if self.instrument in ['rv','lc']:
                self.kernel = exp_kernel
            else: