Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print('KK2: ',s1, t1-t0, s1-s0)
gk_cat2.unload()
np.testing.assert_allclose(kk1.xi, kk2.xi)
np.testing.assert_allclose(kk1.weight, kk2.weight)
# NK with r_col still, but not from a file.
gk_cat1 = treecorr.Catalog(ra=ra[:ngal//100], dec=dec[:ngal//100], r=r[:ngal//100],
g1=g1[:ngal//100], g2=g2[:ngal//100], k=k[:ngal//100],
ra_units='rad', dec_units='rad',
patch_centers=patch_centers)
gk_cat2 = treecorr.Catalog(ra=ra[:ngal//100], dec=dec[:ngal//100], r=r[:ngal//100],
g1=g1[:ngal//100], g2=g2[:ngal//100], k=k[:ngal//100],
ra_units='rad', dec_units='rad',
patch_centers=patch_centers, save_patch_dir='output')
nk1 = treecorr.NKCorrelation(bin_size=0.5, min_sep=1., max_sep=20.)
t0 = time.time()
s0 = hp.heap().size if hp else 0
nk1.process(gk_cat1, gk_cat1)
t1 = time.time()
s1 = hp.heap().size if hp else 0
print('NK1: ',s1, t1-t0, s1-s0)
gk_cat1.unload()
nk2 = treecorr.NKCorrelation(bin_size=0.5, min_sep=1., max_sep=20.)
t0 = time.time()
s0 = hp.heap().size if hp else 0
nk2.process(gk_cat2, gk_cat2, low_mem=True)
t1 = time.time()
s1 = hp.heap().size if hp else 0
print('NK2: ',s1, t1-t0, s1-s0)
gk_cat2.unload()
np.testing.assert_allclose(nk1.xi, nk2.xi)
print('nk = ',nk.xi)
print('var = ',nk.varxi)
print('Direct jackknife:')
xi_list = []
for i in range(npatch):
lens_cat1 = treecorr.Catalog(x=lens_cat.x[lens_cat.patch != i],
y=lens_cat.y[lens_cat.patch != i])
rand_lens_cat1 = treecorr.Catalog(x=rand_lens_cat.x[rand_lens_cat.patch != i],
y=rand_lens_cat.y[rand_lens_cat.patch != i])
source_cat1 = treecorr.Catalog(x=source_cat.x[source_cat.patch != i],
y=source_cat.y[source_cat.patch != i],
k=source_cat.k[source_cat.patch != i])
nk1 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
nk1.process(lens_cat1, source_cat1)
rk1 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
rk1.process(rand_lens_cat1, source_cat1)
nk1.calculateXi(rk1)
xi_list.append(nk1.xi)
xi_list = np.array(xi_list)
C = np.cov(xi_list.T, bias=True) * (len(xi_list)-1)
varxi = np.diagonal(C)
print('var = ',varxi)
np.testing.assert_allclose(nk.varxi, varxi)
# Repeat for NG, GG, KK, KG
ng = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True,
var_method='jackknife')
ng.process(lens_cat, source_cat)
gg = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True,
var_method='jackknife')
gg.process(source_cat)
ascii_name = 'output/nk_ascii.txt'
nk.write(ascii_name, precision=16)
nk3 = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
nk3.read(ascii_name)
np.testing.assert_allclose(nk3.npairs, nk.npairs)
np.testing.assert_allclose(nk3.weight, nk.weight)
np.testing.assert_allclose(nk3.meanr, nk.meanr)
np.testing.assert_allclose(nk3.meanlogr, nk.meanlogr)
np.testing.assert_allclose(nk3.xi, nk.xi)
with assert_raises(TypeError):
nk2 += config
nk4 = treecorr.NKCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins)
with assert_raises(ValueError):
nk2 += nk4
nk5 = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins)
with assert_raises(ValueError):
nk2 += nk5
nk6 = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2)
with assert_raises(ValueError):
nk2 += nk6
fits_name = 'output/nk_fits.fits'
nk.write(fits_name)
nk4 = treecorr.NKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
nk4.read(fits_name)
np.testing.assert_allclose(nk4.npairs, nk.npairs)
np.testing.assert_allclose(nk4.weight, nk.weight)
np.testing.assert_allclose(nk4.meanr, nk.meanr)
np.testing.assert_allclose(nk4.meanlogr, nk.meanlogr)
np.testing.assert_allclose(nk4.xi, nk.xi)
t0 = time.time()
rk2.process(cat2p, cat2p)
t1 = time.time()
print('Time for processing RK = ',t1-t0)
nk2 = nk.copy()
nk2.calculateXi(rk2)
print('xi = ',nk2.xi)
print('varxi = ',nk2.varxi)
print('ratio = ',nk2.varxi / var_nk_xi)
np.testing.assert_allclose(nk2.weight, nk.weight, rtol=0.02*tol_factor)
np.testing.assert_allclose(nk2.xi, nk.xi, rtol=0.02*tol_factor)
np.testing.assert_allclose(nk2.varxi, var_nk_xi, rtol=0.3*tol_factor)
# Use a random catalog without patches
rk3 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
t0 = time.time()
rk3.process(cat2, cat2p)
t1 = time.time()
print('Time for processing RK = ',t1-t0)
nk3 = nk.copy()
nk3.calculateXi(rk3)
print('xi = ',nk3.xi)
print('varxi = ',nk3.varxi)
print('ratio = ',nk3.varxi / var_nk_xi)
np.testing.assert_allclose(nk3.weight, nk.weight, rtol=0.02*tol_factor)
np.testing.assert_allclose(nk3.xi, nk.xi, rtol=0.02*tol_factor)
np.testing.assert_allclose(nk3.varxi, var_nk_xi, rtol=0.4*tol_factor)
# KK
# Smaller scales to capture the more local kappa correlations.
print('max abs diff = ',np.max(np.abs(gg.xip - xi_brut)))
print('max rel diff = ',np.max(np.abs(gg.xip - xi_brut)/np.abs(gg.xip)))
np.testing.assert_allclose(gg.xip, xi_brut, atol=2.e-7)
xi_brut = corr2d(x, y, gamma, np.conj(gamma), w=1./kappa_err**2, rmax=max_sep, bins=nbins)
# Check omitting max_sep
gg = treecorr.GGCorrelation(bin_size=2, nbins=nbins, bin_type='TwoD', brute=True)
gg.process(cat2)
print('max abs diff = ',np.max(np.abs(gg.xip - xi_brut)))
print('max rel diff = ',np.max(np.abs(gg.xip - xi_brut)/np.abs(gg.xip)))
np.testing.assert_allclose(gg.xip, xi_brut, atol=2.e-7)
# Check NK
xi_brut = corr2d(x, y, np.ones_like(kappa), kappa, rmax=max_sep, bins=nbins)
# Check slightly larger bin_size gets rounded down
nk = treecorr.NKCorrelation(max_sep=max_sep, bin_size=2.05, bin_type='TwoD', brute=True)
nk.process(cat1, cat1)
print('max abs diff = ',np.max(np.abs(nk.xi - xi_brut)))
print('max rel diff = ',np.max(np.abs(nk.xi - xi_brut)/np.abs(nk.xi)))
np.testing.assert_allclose(nk.xi, xi_brut, atol=1.e-7)
xi_brut = corr2d(x, y, np.ones_like(kappa), kappa, w=1./kappa_err**2, rmax=max_sep, bins=nbins)
# Check very small, but non-zeo min_sep
nk = treecorr.NKCorrelation(min_sep=1.e-6, max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
nk.process(cat2, cat2)
print('max abs diff = ',np.max(np.abs(nk.xi - xi_brut)))
print('max rel diff = ',np.max(np.abs(nk.xi - xi_brut)/np.abs(nk.xi)))
np.testing.assert_allclose(nk.xi, xi_brut, atol=1.e-7)
# Check NN
xi_brut, counts = corr2d(x, y, np.ones_like(kappa), np.ones_like(kappa),
rmax=max_sep, bins=nbins, return_counts=True)
print('varxi = ',nk.varxi)
print('ratio = ',nk.varxi / var_nk_xi)
np.testing.assert_array_less((nk.xi - mean_nk_xi)**2/var_nk_xi, 25) # within 5 sigma
np.testing.assert_allclose(nk.varxi, var_nk_xi, rtol=0.5*tol_factor)
# Check sample covariance estimate
cov_xi = nk.estimate_cov('sample')
print('NK sample variance:')
print('varxi = ',cov_xi.diagonal())
print('ratio = ',cov_xi.diagonal() / var_nk_xi)
np.testing.assert_allclose(cov_xi.diagonal(), var_nk_xi, rtol=0.6*tol_factor)
# Use a random catalog
# In this case the locations of the source catalog are fine to use as our random catalog,
# since they fill the region where the lenses are allowed to be.
rk2 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
t0 = time.time()
rk2.process(cat2p, cat2p)
t1 = time.time()
print('Time for processing RK = ',t1-t0)
nk2 = nk.copy()
nk2.calculateXi(rk2)
print('xi = ',nk2.xi)
print('varxi = ',nk2.varxi)
print('ratio = ',nk2.varxi / var_nk_xi)
np.testing.assert_allclose(nk2.weight, nk.weight, rtol=0.02*tol_factor)
np.testing.assert_allclose(nk2.xi, nk.xi, rtol=0.02*tol_factor)
np.testing.assert_allclose(nk2.varxi, var_nk_xi, rtol=0.3*tol_factor)
# Use a random catalog without patches
rk3 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30.)
nk1.process(lens_cat1, source_cat1)
xi_list.append(nk1.xi)
xi_list = np.array(xi_list)
xi = np.mean(xi_list, axis=0)
print('mean xi = ',xi)
C = np.cov(xi_list.T, bias=True) * (len(xi_list)-1)
varxi = np.diagonal(C)
print('varxi = ',varxi)
# xi isn't exact because of the variation in denominators, which doesn't commute with the mean.
# nk.xi is more accurate for the overall estimate of the correlation function.
# The difference gets less as npatch increases.
np.testing.assert_allclose(nk.xi, xi, rtol=0.01 * tol_factor)
np.testing.assert_allclose(nk.varxi, varxi)
# Repeat with randoms.
rk = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
rk.process(rand_lens_cat, source_cat)
nk.calculateXi(rk)
print('With randoms:')
print('nk = ',nk.xi)
print('var = ',nk.varxi)
print('Direct jackknife:')
xi_list = []
for i in range(npatch):
lens_cat1 = treecorr.Catalog(x=lens_cat.x[lens_cat.patch != i],
y=lens_cat.y[lens_cat.patch != i])
rand_lens_cat1 = treecorr.Catalog(x=rand_lens_cat.x[rand_lens_cat.patch != i],
y=rand_lens_cat.y[rand_lens_cat.patch != i])
source_cat1 = treecorr.Catalog(x=source_cat.x[source_cat.patch != i],
y=source_cat.y[source_cat.patch != i],
k=source_cat.k[source_cat.patch != i])
nk.calculateXi(rk)
print('With randoms:')
print('nk = ',nk.xi)
print('var = ',nk.varxi)
print('Direct jackknife:')
xi_list = []
for i in range(npatch):
lens_cat1 = treecorr.Catalog(x=lens_cat.x[lens_cat.patch != i],
y=lens_cat.y[lens_cat.patch != i])
rand_lens_cat1 = treecorr.Catalog(x=rand_lens_cat.x[rand_lens_cat.patch != i],
y=rand_lens_cat.y[rand_lens_cat.patch != i])
source_cat1 = treecorr.Catalog(x=source_cat.x[source_cat.patch != i],
y=source_cat.y[source_cat.patch != i],
k=source_cat.k[source_cat.patch != i])
nk1 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
nk1.process(lens_cat1, source_cat1)
rk1 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True)
rk1.process(rand_lens_cat1, source_cat1)
nk1.calculateXi(rk1)
xi_list.append(nk1.xi)
xi_list = np.array(xi_list)
C = np.cov(xi_list.T, bias=True) * (len(xi_list)-1)
varxi = np.diagonal(C)
print('var = ',varxi)
np.testing.assert_allclose(nk.varxi, varxi)
# Repeat for NG, GG, KK, KG
ng = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True,
var_method='jackknife')
ng.process(lens_cat, source_cat)
gg = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True,
method = 'jackknife' if use_patches else 'shot'
cov = treecorr.estimate_multi_cov([ng,gg], method)
print('cov = ',cov)
print('sigma = ',np.sqrt(cov.diagonal()))
print('S/N = ',np.concatenate([gg.xip,gg.xim,ng.xi]) / np.sqrt(cov.diagonal()))
assert len(gg.xip) == nbins
assert len(gg.xim) == nbins
assert cov.shape == (3*nbins, 3*nbins)
# Apply sensitivities.
print('Process kk')
kk = treecorr.KKCorrelation(bin_config)
print('Process nk')
nk = treecorr.NKCorrelation(bin_config)
kk.process(sources)
nk.process(lenses, sources)
ng.xi /= nk.xi
gg.xip /= kk.xi
gg.xim /= kk.xi
# This makes the assumption that the power spectrum of the sensitivity is effectively uniform
# across the survey. So don't bother propagating covariance of sens.
cov /= np.outer(np.concatenate([nk.xi,kk.xi,kk.xi]),
np.concatenate([nk.xi,kk.xi,kk.xi]))
print('gg.xip => ',gg.xip)
print('gg.xim => ',gg.xim)
print('ng.xi => ',ng.xi)
# Do NG correlation function if necessary
if 'nk_file_name' in config:
if cat2 is None:
raise TypeError("file_name2 is required for nk correlation")
logger.warning("Performing NK calculations...")
nk = treecorr.NKCorrelation(config,logger)
nk.process(cat1,cat2)
logger.info("Done NK calculation.")
rk = None
if rand1 is None:
if config.get('nk_statistic',None) == 'compensated':
raise TypeError("rand_files is required for nk_statistic = compensated")
elif config.get('nk_statistic','compensated') == 'compensated':
rk = treecorr.NKCorrelation(config,logger)
rk.process(rand1,cat2)
logger.info("Done RK calculation.")
nk.write(config['nk_file_name'], rk)
logger.warning("Wrote NK correlation to %s",config['nk_file_name'])
# Do KG correlation function if necessary
if 'kg_file_name' in config:
if cat2 is None:
raise TypeError("file_name2 is required for kg correlation")
logger.warning("Performing KG calculations...")
kg = treecorr.KGCorrelation(config,logger)
kg.process(cat1,cat2)
logger.info("Done KG calculation.")
kg.write(config['kg_file_name'])
logger.warning("Wrote KG correlation to %s",config['kg_file_name'])