Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def open_store_variable(self, name, var):
data = indexing.LazilyOuterIndexedArray(PncArrayWrapper(name, self))
attrs = OrderedDict((k, getattr(var, k)) for k in var.ncattrs())
return Variable(var.dimensions, data, attrs)
"x_grid_index_at_f_location": irange,
"y_grid_index": jrange,
"y_grid_index_at_v_location": jrange,
"y_grid_index_at_f_location": jrange,
"z_grid_index": krange,
"z_grid_index_at_lower_w_location": krange,
"z_grid_index_at_upper_w_location": krange,
"z_grid_index_at_w_location": krange_p1,
}
for dim in self._dimensions:
dim_meta = dimensions[dim]
dims = dim_meta['dims']
attrs = dim_meta['attrs']
data = dimension_data[attrs['standard_name']]
dim_variable = xr.Variable(dims, data, attrs)
self._variables[dim] = dim_variable
# possibly add the llc dimension
# seems sloppy to hard code this here
# TODO: move this metadata to variables.py
if self.llc:
self._dimensions.append(LLC_FACE_DIMNAME)
data = np.arange(self.nface)
attrs = {'standard_name': 'face_index'}
dims = [LLC_FACE_DIMNAME]
self._variables[LLC_FACE_DIMNAME] = xr.Variable(dims, data, attrs)
# do the same for layers
for layer_name, n_layer in self.layers.items():
for suffix, offset in zip(['bounds', 'center', 'interface'],
[0, -1, -2]):
varo.attrs['state'] = state or 'all'
# calculate the source for the bars
cutoff = self.task_config.cutoff
if state and t:
g = df.groupby(pd.cut(df[v + '_' + state], temp_bins))
df_v = v + t + '_' + state #: The variable name in df
counts = g[df_v].count().values
ds[vname + '_counts'] = xr.Variable(
('temp_bins', ), counts, attrs=varo.attrs.copy())
means = g[df_v].mean().values
means[counts <= cutoff] = np.nan
ds[vname + '_mean'] = xr.Variable(
('temp_bins', ), means, attrs=varo.attrs.copy())
std = g[df_v].std().values
std[counts <= cutoff] = np.nan
ds[vname + '_sd'] = xr.Variable(
('temp_bins', ), std, attrs=varo.attrs.copy())
# means
df = self.data[1]
for v, t, state in product(['tmin', 'tmax'], ['stddev', ''],
['', 'wet', 'dry']):
vname = v + t + (('_' + state) if state else '')
ds_vname = vname + '_means'
varo = ds[ds_vname] = xr.Variable(
('index_mean', ), np.asarray(df[vname]),
attrs=ds[vname].attrs.copy())
return ds
# get the computed PMDI data as an array of float32 values
array = _global_shared_arrays[_KEY_RESULT_PMDI][_KEY_ARRAY]
shape = _global_shared_arrays[_KEY_RESULT_PMDI][_KEY_SHAPE]
pmdi = np.frombuffer(array.get_obj()).reshape(shape).astype(np.float32)
# get the computed Z-Index data as an array of float32 values
array = _global_shared_arrays[_KEY_RESULT_ZINDEX][_KEY_ARRAY]
shape = _global_shared_arrays[_KEY_RESULT_ZINDEX][_KEY_SHAPE]
zindex = np.frombuffer(array.get_obj()).reshape(shape).astype(np.float32)
# create a new variable to contain the SCPDSI values, assign into the dataset
long_name = "Self-calibrated Palmer Drought Severity Index"
scpdsi_attrs = {"long_name": long_name, "valid_min": -10.0, "valid_max": 10.0}
var_name_scpdsi = "scpdsi"
scpdsi_var = xr.Variable(dims=output_dims, data=scpdsi, attrs=scpdsi_attrs)
dataset[var_name_scpdsi] = scpdsi_var
# remove all data variables except for the new SCPDSI variable
for var_name in dataset.data_vars:
if var_name != var_name_scpdsi:
dataset = dataset.drop(var_name)
# TODO set global attributes accordingly for this new dataset
# write the dataset as NetCDF
netcdf_file_name = (
keyword_arguments["output_file_base"] + "_" + var_name_scpdsi + ".nc"
)
dataset.to_netcdf(netcdf_file_name)
# create a new variable to contain the PDSI values, assign into the dataset
# remove all data variables except for the new PMDI variable
for var_name in dataset.data_vars:
if var_name != var_name_pmdi:
dataset = dataset.drop(var_name)
# TODO set global attributes accordingly for this new dataset
# write the dataset as NetCDF
netcdf_file_name = kwrgs["output_file_base"] + "_" + var_name_pmdi + ".nc"
dataset.to_netcdf(netcdf_file_name)
# create a new variable to contain the Z-Index values, assign into the dataset
long_name = "Palmer Z-Index"
zindex_attrs = {"long_name": long_name, "valid_min": -10.0, "valid_max": 10.0}
var_name_zindex = "zindex"
zindex_var = xr.Variable(dims=output_dims, data=zindex, attrs=zindex_attrs)
dataset[var_name_zindex] = zindex_var
# remove all data variables except for the new Z-Index variable
for var_name in dataset.data_vars:
if var_name != var_name_zindex:
dataset = dataset.drop(var_name)
# TODO set global attributes accordingly for this new dataset
# write the dataset as NetCDF
netcdf_file_name = kwrgs["output_file_base"] + "_" + var_name_zindex + ".nc"
dataset.to_netcdf(netcdf_file_name)
else:
# add an array to hold results to the dictionary of arrays
# Try to add a time dimension
# TODO: Time units?
if (len(var_data) > 1) and 'time' not in self._variables:
time_bnds = np.asarray([v.time for v in var_data])
times = time_bnds[:, 0]
self._variables['time'] = xr.Variable(
['time', ], times,
{'bounds': 'time_bnds', 'units': cf.CTM_TIME_UNIT_STR}
)
self._variables['time_bnds'] = xr.Variable(
['time', 'nv'], time_bnds,
{'units': cf.CTM_TIME_UNIT_STR}
)
self._variables['nv'] = xr.Variable(['nv', ], [0, 1])
('temp_bins', ), counts, attrs=varo.attrs.copy())
means = g[df_v].mean().values
means[counts <= cutoff] = np.nan
ds[vname + '_mean'] = xr.Variable(
('temp_bins', ), means, attrs=varo.attrs.copy())
std = g[df_v].std().values
std[counts <= cutoff] = np.nan
ds[vname + '_sd'] = xr.Variable(
('temp_bins', ), std, attrs=varo.attrs.copy())
# means
df = self.data[1]
for v, t, state in product(['tmin', 'tmax'], ['stddev', ''],
['', 'wet', 'dry']):
vname = v + t + (('_' + state) if state else '')
ds_vname = vname + '_means'
varo = ds[ds_vname] = xr.Variable(
('index_mean', ), np.asarray(df[vname]),
attrs=ds[vname].attrs.copy())
return ds
# get the computed PMDI data as an array of float32 values
array = _global_shared_arrays[_KEY_RESULT_PMDI][_KEY_ARRAY]
shape = _global_shared_arrays[_KEY_RESULT_PMDI][_KEY_SHAPE]
pmdi = np.frombuffer(array.get_obj()).reshape(shape).astype(np.float32)
# get the computed Z-Index data as an array of float32 values
array = _global_shared_arrays[_KEY_RESULT_ZINDEX][_KEY_ARRAY]
shape = _global_shared_arrays[_KEY_RESULT_ZINDEX][_KEY_SHAPE]
zindex = np.frombuffer(array.get_obj()).reshape(shape).astype(np.float32)
# create a new variable to contain the SCPDSI values, assign into the dataset
long_name = "Self-calibrated Palmer Drought Severity Index"
scpdsi_attrs = {"long_name": long_name, "valid_min": -10.0, "valid_max": 10.0}
var_name_scpdsi = "scpdsi"
scpdsi_var = xr.Variable(dims=output_dims, data=scpdsi, attrs=scpdsi_attrs)
dataset[var_name_scpdsi] = scpdsi_var
# remove all data variables except for the new SCPDSI variable
for var_name in dataset.data_vars:
if var_name != var_name_scpdsi:
dataset = dataset.drop(var_name)
# TODO set global attributes accordingly for this new dataset
# write the dataset as NetCDF
netcdf_file_name = (
keyword_arguments["output_file_base"] + "_" + var_name_scpdsi + ".nc"
)
dataset.to_netcdf(netcdf_file_name)
# create a new variable to contain the PDSI values, assign into the dataset
def _get_nonspatial_coords(src_data_array):
coords = {}
for coord in set(src_data_array.coords) - {
src_data_array.rio.x_dim,
src_data_array.rio.y_dim,
DEFAULT_GRID_MAP,
}:
if src_data_array[coord].dims:
coords[coord] = xarray.IndexVariable(
src_data_array[coord].dims,
src_data_array[coord].values,
src_data_array[coord].attrs,
)
else:
coords[coord] = xarray.Variable(
src_data_array[coord].dims,
src_data_array[coord].values,
src_data_array[coord].attrs,
)
return coords
raise ValueError(
"Unsupported index: '{index}'".format(index=keyword_arguments["index"])
)
# get the name and attributes to use for the index variable in the output NetCDF
output_var_name, output_var_attributes = _get_variable_attributes(
keyword_arguments
)
# get the shared memory results array and convert it to a numpy array
array = _global_shared_arrays[_KEY_RESULT][_KEY_ARRAY]
shape = _global_shared_arrays[_KEY_RESULT][_KEY_SHAPE]
index_values = np.frombuffer(array.get_obj()).reshape(shape).astype(np.float32)
# create a new variable to contain the index values, assign into the dataset
variable = xr.Variable(
dims=output_dims, data=index_values, attrs=output_var_attributes
)
dataset[output_var_name] = variable
# TODO set global attributes accordingly for this new dataset
# remove all data variables except for the new variable
for var_name in dataset.data_vars:
if var_name != output_var_name:
dataset = dataset.drop(var_name)
# write the dataset as NetCDF
netcdf_file_name = (
keyword_arguments["output_file_base"] + "_" + output_var_name + ".nc"
)
dataset.to_netcdf(netcdf_file_name)