Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_nearest_base(self):
res = kd_tree.resample_nearest(self.tswath,\
self.tdata.ravel(), self.tgrid,\
100000, reduce_data=False, segments=1)
self.assertTrue(res[0] == 2, 'Failed to calculate nearest neighbour')
def unpatch_kd_tree():
"""Unpatching the kd_tree module.
"""
kd_tree.get_neighbour_info = kd_tree.old_get_neighbour_info
delattr(kd_tree, "old_get_neighbour_info")
kd_tree.get_sample_from_neighbour_info = kd_tree.old_gsfni
delattr(kd_tree, "old_gsfni")
def unpatch_kd_tree():
"""Unpatching the kd_tree module.
"""
kd_tree.get_neighbour_info = kd_tree.old_get_neighbour_info
delattr(kd_tree, "old_get_neighbour_info")
kd_tree.get_sample_from_neighbour_info = kd_tree.old_gsfni
delattr(kd_tree, "old_gsfni")
dates = self.cmaq.dates[self.cmaq.indexdates]
lat = self.cmaq.latitude
lon = self.cmaq.longitude
grid1 = geometry.GridDefinition(lons=lon, lats=lat)
vals = array([], dtype=cmaqvar.dtype)
date = array([], dtype='O')
site = array([], dtype=df.SCS.dtype)
print ' Interpolating using ' + interp + ' method'
for i, j in enumerate(dates):
con = df.datetime == j
lats = df[con].Latitude.values
lons = df[con].Longitude.values
grid2 = geometry.GridDefinition(lons=vstack(lons), lats=vstack(lats))
if interp.lower() == 'nearest':
val = kd_tree.resample_nearest(grid1, cmaqvar[i, :, :].squeeze(), grid2, radius_of_influence=r,
fill_value=NaN, nprocs=2).squeeze()
elif interp.lower() == 'idw':
val = kd_tree.resample_custom(grid1, cmaqvar[i, :, :].squeeze(), grid2, radius_of_influence=r,
fill_value=NaN, neighbours=n, weight_funcs=weight_func,
nprocs=2).squeeze()
elif interp.lower() == 'gauss':
val = kd_tree.resample_gauss(grid1, cmaqvar[i, :, :].squeeze(), grid2, radius_of_influence=r,
sigmas=r / 2., fill_value=NaN, neighbours=n, nprocs=2).squeeze()
vals = append(vals, val)
dd = empty(lons.shape[0], dtype=date.dtype)
dd[:] = j
date = append(date, dd)
site = append(site, df[con].SCS.values)
vals = pd.Series(vals)
date = pd.Series(date)
ht_0 = nc.variables['nominal_satellite_height'][0] * 1000 # meters
x = nc.variables['x'][:] * ht_0 #/ 1000.0
y = nc.variables['y'][:] * ht_0 #/ 1000.0
nx = len(x)
ny = len(y)
max_x = x.max(); min_x = x.min(); max_y = y.max(); min_y = y.min()
half_x = (max_x - min_x) / nx / 2.
half_y = (max_y - min_y) / ny / 2.
extents = (min_x - half_x, min_y - half_y, max_x + half_x, max_y + half_y)
old_grid = pr.geometry.AreaDefinition('geos','goes_conus','geos',
{'proj':'geos', 'h':str(ht_0), 'lon_0':str(lon_0) ,'a':'6378169.0', 'b':'6356584.0'},
nx, ny, extents)
# now do remapping
logging.info('Remapping from {}'.format(old_grid))
return pr.kd_tree.resample_nearest(old_grid, data, new_grid, radius_of_influence=50000)
if print_msg:
print('input source data is not float, change fill_value from NaN to 0.')
# reduction of swath data
if self.valid_index is not None:
src_data = src_data[self.valid_index]
# get number of segments
num_segment = self.get_segment_number()
if interp_method.startswith('near'):
if print_msg:
msg = 'nearest resampling with kd_tree '
msg += 'using {} processor cores in {} segments ...'.format(nprocs, num_segment)
print(msg)
dest_data = pr.kd_tree.resample_nearest(self.src_def,
src_data,
self.dest_def,
nprocs=nprocs,
fill_value=fill_value,
radius_of_influence=radius,
segments=num_segment,
epsilon=0.5)
elif interp_method.endswith('linear'):
if print_msg:
print('bilinear resampling using {} processor cores ...'.format(nprocs))
dest_data = pr.bilinear.resample_bilinear(src_data,
self.src_def,
self.dest_def,
nprocs=nprocs,
fill_value=fill_value,