Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _temp_comp_sub(self, usub=None):
if usub is None:
usub = self.strm_usub.usub
if self._normalize:
C, S = self.C_norm_sub, self.S_norm_sub
else:
C, S = self.C_sub, self.S_sub
cur_temp = dict()
if self._showC:
cur_temp['C'] = (
hv.Dataset(C.sel(unit_id=usub)
.compute().rename("Intensity (A. U.)")
.dropna('frame', how='all')).to(hv.Curve, 'frame'))
if self._showS:
cur_temp['S'] = (
hv.Dataset(S.sel(unit_id=usub)
.compute().rename("Intensity (A. U.)")
.dropna('frame', how='all')).to(hv.Curve, 'frame'))
cur_vl = (hv.DynamicMap(
lambda f, y: hv.VLine(f) if f else hv.VLine(0),
streams=[self.strm_f])
.opts(style=dict(color='red')))
cur_cv = hv.Curve([], kdims=['frame'], vdims=['Internsity (A.U.)'])
self.strm_f.source = cur_cv
h_cv = len(self._w) // 8
w_cv = len(self._w) * 2
temp_comp = (cur_cv
* datashade_ndcurve(hv.HoloMap(cur_temp, 'trace')
.collate().overlay('trace')
def test_dataset_ndloc_index(self):
xs, ys = np.linspace(0.12, 0.81, 10), np.linspace(0.12, 0.391, 5)
arr = np.arange(10)*np.arange(5)[np.newaxis].T
ds = Dataset((xs, ys, arr), kdims=['x', 'y'], vdims=['z'], datatype=[self.datatype])
self.assertEqual(ds.ndloc[0,0], arr[0, 0])
def test_dataset_groupby_drop_dims_with_vdim(self):
array = np.random.rand(3, 20, 10)
ds = Dataset({'x': range(10), 'y': range(20), 'z': range(3), 'Val': array, 'Val2': array*2},
kdims=['x', 'y', 'z'], vdims=['Val', 'Val2'])
with DatatypeContext([self.datatype, 'dictionary' , 'dataframe'], (ds, Dataset)):
partial = ds.to(Dataset, kdims=['Val'], vdims=['Val2'], groupby='y')
self.assertEqual(partial.last['Val'], array[:, -1, :].T.flatten())
def test_dataset_ndloc_lists(self):
xs, ys = np.linspace(0.12, 0.81, 10), np.linspace(0.12, 0.391, 5)
arr = np.arange(10)*np.arange(5)[np.newaxis].T
ds = Dataset((xs, ys, arr), kdims=['x', 'y'], vdims=['z'], datatype=[self.datatype, 'dictionary'])
sliced = Dataset((xs[[1, 2, 3]], ys[[0, 1, 2]], arr[[0, 1, 2], [1, 2, 3]]), kdims=['x', 'y'], vdims=['z'],
datatype=['dictionary'])
self.assertEqual(ds.ndloc[[0, 1, 2], [1, 2, 3]], sliced)
def test_dataset_extract_all_kdims_with_vdims_defined(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Dataset(df, vdims=['x'])
self.assertEqual(ds.kdims, [Dimension('y'), Dimension('z')])
self.assertEqual(ds.vdims, [Dimension('x')])
def test_dataset_aggregate_ht(self):
aggregated = Dataset({'Gender':['M', 'F'], 'Weight':[16.5, 10], 'Height':[0.7, 0.8]},
kdims=self.kdims[:1], vdims=self.vdims)
self.compare_dataset(self.table.aggregate(['Gender'], np.mean), aggregated)
def test_dataset_dynamic_groupby_with_transposed_dimensions(self):
dat = np.zeros((3,5,7))
dataset = Dataset((range(7), range(5), range(3), dat), ['z','x','y'], 'value')
grouped = dataset.groupby('z', kdims=['y', 'x'], dynamic=True)
self.assertEqual(grouped[2].dimension_values(2, flat=False), dat[:, :, -1].T)
def test_dataset_scalar_groupby(self):
ds = Dataset({'A': 1, 'B': np.arange(10)}, kdims=['A', 'B'])
groups = ds.groupby('A')
self.assertEqual(groups, HoloMap({1: Dataset({'B': np.arange(10)}, 'B')}, 'A'))
def test_dataset_ndloc_lists_invert_xy(self):
xs, ys = np.linspace(0.12, 0.81, 10), np.linspace(0.12, 0.391, 5)
arr = np.arange(10)*np.arange(5)[np.newaxis].T
ds = Dataset((xs[::-1], ys[::-1], arr), kdims=['x', 'y'], vdims=['z'], datatype=[self.datatype, 'dictionary'])
sliced = Dataset((xs[::-1][[8, 7, 6]], ys[::-1][[4, 3, 2]], arr[[4, 3, 2], [8, 7, 6]]), kdims=['x', 'y'], vdims=['z'],
datatype=['dictionary'])
self.assertEqual(ds.ndloc[[0, 1, 2], [1, 2, 3]], sliced)
def init_column_data(self):
import dask.array
self.xs = np.array(range(11))
self.xs_2 = self.xs**2
self.y_ints = self.xs*2
dask_y = dask.array.from_array(np.array(self.y_ints), 2)
self.dataset_hm = Dataset((self.xs, dask_y),
kdims=['x'], vdims=['y'])
self.dataset_hm_alias = Dataset((self.xs, dask_y),
kdims=[('x', 'X')], vdims=[('y', 'Y')])