Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
-----
This method requires a dataset of equal-sized time series
Examples
--------
>>> time_series = [[1, 2, 3, 4], [1, 2, 4, 5]]
>>> bar = euclidean_barycenter(time_series)
>>> bar.shape
(4, 1)
>>> bar
array([[1. ],
[2. ],
[3.5],
[4.5]])
"""
X_ = to_time_series_dataset(X)
weights = _set_weights(weights, X_.shape[0])
return numpy.average(X_, axis=0, weights=weights)
def prepare_transform(self, ts_to_be_rescaled):
"""Prepare the model for temporal resampling by computing DTW alignment path between the reference time series
and a time series to be rescaled or a set of time series to be rescaled.
If ts_to_be_rescaled contains a single time series, all series from the dataset will be rescaled using the
DTW path between that time series and the reference one, otherwise, the X array given at transform time
should have the same number of time series (X.shape[0]) as ts_to_be_rescaled.
Parameters
----------
ts_to_be_rescaled : numpy.ndarray
A time series dataset of base modalities of shape (n_ts, sz, d) with
``d = self.reference_series_.shape[-1]``
"""
ts_to_be_rescaled = to_time_series_dataset(ts_to_be_rescaled)
# Now ts_to_be_rescaled is of shape n_ts, sz, d
# with d = self.reference_series.shape[-1]
self.saved_dtw_paths_ = []
for ts in ts_to_be_rescaled:
end = first_non_finite_index(ts)
resampled_ts = _resampled(ts[:end], n_samples=self.n_samples, kind=self.interp_kind)
if self.metric == "dtw":
path, d = dtw_path(self.reference_series_, resampled_ts)
elif self.metric == "lrdtw":
path, d = lr_dtw_path(self.reference_series_, resampled_ts, gamma=self.gamma_lr_dtw)
else:
raise ValueError("Unknown alignment function")
self.saved_dtw_paths_.append(path)
def support_vectors_time_series_(self, X):
X_ = to_time_series_dataset(X)
sv = []
idx_start = 0
for cl in range(len(self.svm_estimator_.n_support_)):
idx_end = idx_start + self.svm_estimator_.n_support_[cl]
indices = self.svm_estimator_.support_[idx_start:idx_end]
sv.append(X_[indices])
idx_start += self.svm_estimator_.n_support_[cl]
return sv
def predict_proba(self, X):
"""Predict class probability for a given set of time series.
Parameters
----------
X : array-like of shape=(n_ts, sz, d)
Time series dataset.
Returns
-------
array of shape=(n_ts, n_classes),
Class probability matrix.
"""
check_is_fitted(self, '_X_fit')
X = check_array(X, allow_nd=True)
X = to_time_series_dataset(X)
X = check_dims(X, self._X_fit)
n_ts, sz, d = X.shape
categorical_preds = self.model_.predict(
[X[:, :, di].reshape((n_ts, sz, 1)) for di in range(self.d_)],
batch_size=self.batch_size, verbose=self.verbose
)
if categorical_preds.shape[1] == 1 and len(self.classes_) == 2:
categorical_preds = numpy.hstack((1 - categorical_preds,
categorical_preds))
return categorical_preds
soft_dtw : Compute Soft-DTW
cdist_soft_dtw_normalized : Cross similarity matrix between time series
datasets using a normalized version of Soft-DTW
References
----------
.. [1] M. Cuturi, M. Blondel "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
"""
dataset1 = to_time_series_dataset(dataset1, dtype=numpy.float64)
self_similarity = False
if dataset2 is None:
dataset2 = dataset1
self_similarity = True
else:
dataset2 = to_time_series_dataset(dataset2, dtype=numpy.float64)
dists = numpy.empty((dataset1.shape[0], dataset2.shape[0]))
equal_size_ds1 = check_equal_size(dataset1)
equal_size_ds2 = check_equal_size(dataset2)
for i, ts1 in enumerate(dataset1):
if equal_size_ds1:
ts1_short = ts1
else:
ts1_short = ts1[:ts_size(ts1)]
for j, ts2 in enumerate(dataset2):
if equal_size_ds2:
ts2_short = ts2
else:
ts2_short = ts2[:ts_size(ts2)]
if self_similarity and j < i:
dists[i, j] = dists[j, i]
else:
def transform(self, X, y=None):
"""Transform a dataset of time series into its SAX representation.
Parameters
----------
X : array-like of shape (n_ts, sz, d)
Time series dataset
Returns
-------
numpy.ndarray of integers with shape (n_ts, n_segments, d)
SAX-Transformed dataset
"""
X_ = to_time_series_dataset(X)
return self._transform(X_, y)
if dataset2 is None:
# Inspired from code by @GillesVandewiele:
# https://github.com/rtavenar/tslearn/pull/128#discussion_r314978479
matrix = numpy.zeros((len(dataset1), len(dataset1)))
indices = numpy.triu_indices(len(dataset1), k=1, m=len(dataset1))
matrix[indices] = Parallel(n_jobs=n_jobs, prefer="threads")(
delayed(dtw)(
dataset1[i], dataset1[j],
global_constraint=global_constraint,
sakoe_chiba_radius=sakoe_chiba_radius,
itakura_max_slope=itakura_max_slope)
for i in range(len(dataset1)) for j in range(i + 1, len(dataset1))
)
return matrix + matrix.T
else:
dataset2 = to_time_series_dataset(dataset2)
matrix = Parallel(n_jobs=n_jobs, prefer="threads")(
delayed(dtw)(
dataset1[i], dataset2[j],
global_constraint=global_constraint,
sakoe_chiba_radius=sakoe_chiba_radius,
itakura_max_slope=itakura_max_slope)
for i in range(len(dataset1)) for j in range(len(dataset2))
)
return numpy.array(matrix).reshape((len(dataset1), -1))
def transform(self, X, y=None):
"""Transform a dataset of time series into its 1d-SAX representation.
Parameters
----------
X : array-like of shape (n_ts, sz, d)
Time series dataset
Returns
-------
numpy.ndarray of integers with shape (n_ts, n_segments, 2 * d)
1d-SAX-Transformed dataset
"""
X_ = to_time_series_dataset(X)
return self._transform(X_, y)
... [[1, 2, 2, 3], [1., 2., 3., 4.]], gamma=.01)
array([[-0.01098612, 1. ],
[ 1. , 0. ]])
See Also
--------
soft_dtw : Compute Soft-DTW
cdist_soft_dtw_normalized : Cross similarity matrix between time series
datasets using a normalized version of Soft-DTW
References
----------
.. [1] M. Cuturi, M. Blondel "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
"""
dataset1 = to_time_series_dataset(dataset1, dtype=numpy.float64)
self_similarity = False
if dataset2 is None:
dataset2 = dataset1
self_similarity = True
else:
dataset2 = to_time_series_dataset(dataset2, dtype=numpy.float64)
dists = numpy.empty((dataset1.shape[0], dataset2.shape[0]))
equal_size_ds1 = check_equal_size(dataset1)
equal_size_ds2 = check_equal_size(dataset2)
for i, ts1 in enumerate(dataset1):
if equal_size_ds1:
ts1_short = ts1
else:
ts1_short = ts1[:ts_size(ts1)]
for j, ts2 in enumerate(dataset2):
if equal_size_ds2:
def _prepare_ts_datasets_sklearn(X):
"""Prepare time series datasets for sklearn.
Examples
--------
>>> X = to_time_series_dataset([[1, 2, 3], [2, 2, 3]])
>>> _prepare_ts_datasets_sklearn(X).shape
(2, 3)
"""
sklearn_X = to_time_series_dataset(X)
n_ts, sz, d = sklearn_X.shape
return sklearn_X.reshape((n_ts, -1))