Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _coarsen_reshape(self, windows, boundary, side):
"""
Construct a reshaped-array for coarsen
"""
if not utils.is_dict_like(boundary):
boundary = {d: boundary for d in windows.keys()}
if not utils.is_dict_like(side):
side = {d: side for d in windows.keys()}
# remove unrelated dimensions
boundary = {k: v for k, v in boundary.items() if k in windows}
side = {k: v for k, v in side.items() if k in windows}
for d, window in windows.items():
if window <= 0:
raise ValueError(f"window must be > 0. Given {window}")
variable = self
for d, window in windows.items():
# trim or pad the object
size = variable.shape[self._get_axis_num(d)]
n = int(size / window)
if boundary[d] == "exact":
def is_alignable(obj):
return isinstance(obj, (DataArray, Dataset))
positions = []
keys = []
out = []
targets = []
no_key = object()
not_replaced = object()
for position, variables in enumerate(objects):
if is_alignable(variables):
positions.append(position)
keys.append(no_key)
targets.append(variables)
out.append(not_replaced)
elif is_dict_like(variables):
current_out = {}
for k, v in variables.items():
if is_alignable(v) and k not in indexes:
# Skip variables in indexes for alignment, because these
# should to be overwritten instead:
# https://github.com/pydata/xarray/issues/725
# https://github.com/pydata/xarray/issues/3377
# TODO(shoyer): doing this here feels super-hacky -- can we
# move it explicitly into merge instead?
positions.append(position)
keys.append(k)
targets.append(v)
current_out[k] = not_replaced
else:
current_out[k] = v
out.append(current_out)
def _infer_coords_and_dims(
shape, coords, dims
) -> "Tuple[Dict[Any, Variable], Tuple[Hashable, ...]]":
"""All the logic for creating a new DataArray"""
if (
coords is not None
and not utils.is_dict_like(coords)
and len(coords) != len(shape)
):
raise ValueError(
"coords is not dict-like, but it has %s items, "
"which does not match the %s dimensions of the "
"data" % (len(coords), len(shape))
)
if isinstance(dims, str):
dims = (dims,)
if dims is None:
dims = ["dim_%s" % n for n in range(len(shape))]
if coords is not None and len(coords) == len(shape):
# try to infer dimensions from coords
if utils.is_dict_like(coords):
def __setitem__(self, key, value):
"""Add an array to this dataset.
If value is a `DataArray`, call its `select_vars()` method, rename it
to `key` and merge the contents of the resulting dataset into this
dataset.
If value is an `Variable` object (or tuple of form
``(dims, data[, attrs])``), add it to this dataset as a new
variable.
"""
if utils.is_dict_like(key):
raise NotImplementedError('cannot yet use a dictionary as a key '
'to set Dataset values')
self.update({key: value})
def _fix_attributes(attributes):
attributes = dict(attributes)
for k in list(attributes):
if k.lower() == "global" or k.lower().endswith("_global"):
# move global attributes to the top level, like the netcdf-C
# DAP client
attributes.update(attributes.pop(k))
elif is_dict_like(attributes[k]):
# Make Hierarchical attributes to a single level with a
# dot-separated key
attributes.update(
{
"{}.{}".format(k, k_child): v_child
for k_child, v_child in attributes.pop(k).items()
}
)
return attributes
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def _check_data_shape(data, coords, dims):
if data is dtypes.NA:
data = np.nan
if coords is not None and utils.is_scalar(data, include_0d=False):
if utils.is_dict_like(coords):
if dims is None:
return data
else:
data_shape = tuple(
as_variable(coords[k], k).size if k in coords.keys() else 1
for k in dims
)
else:
data_shape = tuple(as_variable(coord, "foo").size for coord in coords)
data = np.full(data_shape, data)
return data
'for in-place arithmetic operations: %s, %s'
% (list(lhs_data_vars), list(rhs_data_vars)))
dest_vars = OrderedDict()
for k in lhs_data_vars:
if k in rhs_data_vars:
dest_vars[k] = f(lhs_vars[k], rhs_vars[k])
elif join in ["left", "outer"]:
dest_vars[k] = f(lhs_vars[k], np.nan)
for k in rhs_data_vars:
if k not in dest_vars and join in ["right", "outer"]:
dest_vars[k] = f(rhs_vars[k], np.nan)
return dest_vars
if utils.is_dict_like(other) and not isinstance(other, Dataset):
# can't use our shortcut of doing the binary operation with
# Variable objects, so apply over our data vars instead.
new_data_vars = apply_over_both(self.data_vars, other,
self.data_vars, other)
return Dataset(new_data_vars)
other_coords = getattr(other, 'coords', None)
ds = self.coords.merge(other_coords)
if isinstance(other, Dataset):
new_vars = apply_over_both(self.data_vars, other.data_vars,
self.variables, other.variables)
else:
other_variable = getattr(other, 'variable', other)
new_vars = OrderedDict((k, f(self.variables[k], other_variable))
for k in self.data_vars)
def expand(self, key):
"""Parse key using xarray utils to ensure we have dimension names."""
if not is_dict_like(key):
labels = expanded_indexer(key, self.data_array.ndim)
key = dict(zip(self.data_array.dims, labels))
return key
def collect_dict_values(
objects: Iterable[Union[Mapping, Any]], keys: Iterable, fill_value: object = None
) -> List[list]:
return [
[obj.get(key, fill_value) if is_dict_like(obj) else obj for obj in objects]
for key in keys
]