How to use the celerite.terms.Matern32Term function in celerite

To help you get started, we’ve selected a few celerite examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github iancze / PSOAP / psoap / covariance.py View on Github external
def determine_all_velocities(chunk, log_sigma, log_rho, mu_GP=1.0):
    kernel = terms.Matern32Term(log_sigma=log_sigma, log_rho=log_rho)
    gp = celerite.GP(kernel, mean=1.0, fit_mean=False)

    lwl_fixed = chunk.lwl[0]
    fl_fixed = chunk.fl[0]
    sigma_fixed = chunk.sigma[0]

    velocities = np.empty(chunk.n_epochs, dtype=np.float64)
    velocities[0] = 0.0

    for i in range(1, chunk.n_epochs):
        try:
            velocities[i] = optimize_epoch_velocity_f(chunk.lwl[i], chunk.fl[i], chunk.sigma[i], lwl_fixed, fl_fixed, sigma_fixed, gp)
        except C.ChunkError as e:
            print("Unable to optimize velocity for epoch {:}".format(chunk.date1D[i]))
            velocities[i] = 0.0
github KeplerGO / lightkurve / lightkurve / correctors.py View on Github external
# We do this by estimating the long term trend y by applying the
            # preliminary PLD model defined above and subtracting it from the raw light curve.
            # The "in transit" cadences are masked out in this step to prevent the
            # long term approximation from over-fitting the transits.
            XTX = np.dot(MX.T, MX)
            XTX[np.diag_indices_from(XTX)] += 1e-8
            XTy = np.dot(MX.T, M(rawflux))
            y = M(rawflux) - np.dot(MX, np.linalg.solve(XTX, XTy))

            # Estimate the amplitude parameter of a Matern-3/2 kernel GP
            # by computing the standard deviation of y.
            amp = np.nanstd(y)
            tau = gp_timescale  # tau is a user-defined parameter
            # set up gaussian process using celerite
            # we use a Matern-3/2 kernel for its flexibility and non-periodicity
            kernel = celerite.terms.Matern32Term(np.log(amp), np.log(tau))
            gp = celerite.GP(kernel)
            gp.compute(M(self.time), M(rawflux_err))

            # compute the coefficients C on the basis vectors;
            # the PLD design matrix will be dotted with C to solve for the noise model.
            A = np.dot(MX.T, gp.apply_inverse(MX))
            B = np.dot(MX.T, gp.apply_inverse(M(rawflux)[:, None])[:, 0])

        else:
            # compute the coefficients C on the basis vectors;
            # the PLD design matrix will be dotted with C to solve for the noise model.
            ivar = 1.0 / M(rawflux_err)**2 # inverse variance
            A = np.dot(MX.T, MX * ivar[:, None])
            B = np.dot(MX.T, M(rawflux) * ivar)

        # apply prior to design matrix weights for numerical stability
github KeplerGO / lightkurve / lightkurve / correctors / pldcorrector.py View on Github external
# We do this by estimating the long term trend y by applying the
            # preliminary PLD model defined above and subtracting it from the raw light curve.
            # The "in transit" cadences are masked out in this step to prevent the
            # long term approximation from over-fitting the transits.
            XTX = np.dot(MX.T, MX)
            XTX[np.diag_indices_from(XTX)] += 1e-8
            XTy = np.dot(MX.T, M(rawflux))
            y = M(rawflux) - np.dot(MX, np.linalg.solve(XTX, XTy))

            # Estimate the amplitude parameter of a Matern-3/2 kernel GP
            # by computing the standard deviation of y.
            amp = np.nanstd(y)
            tau = gp_timescale  # tau is a user-defined parameter
            # set up gaussian process using celerite
            # we use a Matern-3/2 kernel for its flexibility and non-periodicity
            kernel = celerite.terms.Matern32Term(np.log(amp), np.log(tau))
            gp = celerite.GP(kernel)
            gp.compute(M(self.time), M(rawflux_err))

            # compute the coefficients C on the basis vectors;
            # the PLD design matrix will be dotted with C to solve for the noise model.
            A = np.dot(MX.T, gp.apply_inverse(MX))
            B = np.dot(MX.T, gp.apply_inverse(M(rawflux)[:, None])[:, 0])

        else:
            # compute the coefficients C on the basis vectors;
            # the PLD design matrix will be dotted with C to solve for the noise model.
            ivar = 1.0 / M(rawflux_err)**2 # inverse variance
            A = np.dot(MX.T, MX * ivar[:, None])
            B = np.dot(MX.T, M(rawflux) * ivar)

        # apply prior to design matrix weights for numerical stability
github MNGuenther / allesfitter / allesfitter / computer.py View on Github external
def stellar_var_get_gp(params, key):
    
    #::: kernel
    if config.BASEMENT.settings['stellar_var_'+key] == 'sample_GP_real':
        kernel = terms.RealTerm(log_a=params['stellar_var_gp_real_lna_'+key], 
                                log_c=params['stellar_var_gp_real_lnc_'+key])
        
    elif config.BASEMENT.settings['stellar_var_'+key] == 'sample_GP_complex':
        kernel = terms.ComplexTerm(log_a=params['stellar_var_gp_complex_lna_'+key], 
                                log_b=params['stellar_var_gp_complex_lnb_'+key], 
                                log_c=params['stellar_var_gp_complex_lnc_'+key], 
                                log_d=params['stellar_var_gp_complex_lnd_'+key])
                  
    elif config.BASEMENT.settings['stellar_var_'+key] == 'sample_GP_Matern32':
        kernel = terms.Matern32Term(log_sigma=params['stellar_var_gp_matern32_lnsigma_'+key], 
                                    log_rho=params['stellar_var_gp_matern32_lnrho_'+key])
        
    elif config.BASEMENT.settings['stellar_var_'+key] == 'sample_GP_SHO':
        kernel = terms.SHOTerm(log_S0=params['stellar_var_gp_sho_lnS0_'+key], 
                               log_Q=params['stellar_var_gp_sho_lnQ_'+key],
                               log_omega0=params['stellar_var_gp_sho_lnomega0_'+key])                               
        
    else: 
        KeyError('GP settings and params do not match.')
    
    #::: GP and mean (simple offset)  
    if 'stellar_var_gp_offset_'+key in params:
        gp = celerite.GP(kernel, mean=params['stellar_var_gp_offset_'+key], fit_mean=True)
    else:
        gp = celerite.GP(kernel, mean=0.)
github nespinoza / juliet / juliet / fit.py View on Github external
self.use_celerite = True
        elif self.kernel_name == 'CeleriteMaternKernel':
            # Generate matern kernel:
            matern_kernel = terms.Matern32Term(log_sigma=np.log(10.), log_rho=np.log(10.))
            # Jitter term:
            kernel_jitter = terms.JitterTerm(np.log(100*1e-6))
            # Wrap GP kernel and object:
            if self.instrument in ['rv','lc']:
                 self.kernel = matern_kernel
            else:
                 self.kernel = matern_kernel + kernel_jitter
            # We are using celerite:
            self.use_celerite = True
        elif self.kernel_name == 'CeleriteMaternExpKernel':
            # Generate matern and exponential kernels:
            matern_kernel = terms.Matern32Term(log_sigma=np.log(10.), log_rho=np.log(10.))
            exp_kernel = terms.RealTerm(log_a=np.log(10.), log_c=np.log(10.))
            # Jitter term:
            kernel_jitter = terms.JitterTerm(np.log(100*1e-6))
            # Wrap GP kernel and object:
            if self.instrument in ['rv','lc']:
                self.kernel = exp_kernel*matern_kernel
            else:
                self.kernel = exp_kernel*matern_kernel + kernel_jitter
            # We add a phantom variable because we want to leave index 2 without value ON PURPOSE: the idea is 
            # that here, that is always 0 (because this defines the log(sigma) of the matern kernel in the 
            # multiplication, which we set to 1).
            phantomvariable = 1
            # We are using celerite:
            self.use_celerite = True
        elif self.kernel_name == 'CeleriteSHOKernel':
            # Generate kernel:
github MNGuenther / allesfitter / allesfitter.py View on Github external
#        plt.figure()
#        plt.plot(x,y,'k.', color='grey')
#        plt.plot(xx,baseline,'r-', lw=2)
#        plt.show()
#        raw_input('press enter to continue')
#        now = timer() #Time after it finished
#        print("hybrid_spline took: ", now-then, " seconds.")
        return baseline    
    
    
    ###########################################################################
    #::: hybrid_GP (like Gillon+2012, but with a GP)
    ###########################################################################    
    elif command[0] == 'hybrid_GP':
        yerr = np.nanstd(y) #overwrite yerr; works better for removing smooth global trends
        kernel = terms.Matern32Term(log_sigma=1., log_rho=1.)
        gp = celerite.GP(kernel, mean=np.nanmean(y)) 
        gp.compute(x, yerr=yerr) #constrain on x/y/yerr
         
        def neg_log_like(gp_params, y, gp):
            gp.set_parameter_vector(gp_params)
            return -gp.log_likelihood(y)
        
        def grad_neg_log_like(gp_params, y, gp):
            gp.set_parameter_vector(gp_params)
            return -gp.grad_log_likelihood(y)[1]
        
        initial_params = gp.get_parameter_vector()
        bounds = gp.get_parameter_bounds()
        soln = minimize(neg_log_like, initial_params, jac=grad_neg_log_like,
                        method="L-BFGS-B", bounds=bounds, args=(y, gp))
        gp.set_parameter_vector(soln.x)