Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
ft_X = self._x_featurizer.fit_transform(X)
n = shape(T)[0]
dT = self._dt_featurizer.fit_transform(T)
W = np.zeros((n, self._d_w))
# dT should be an n×dₜ×fₜ array (but if T was a vector, or if there is only one feature,
# dT may be only 2-dimensional)
# promote dT to 3D if necessary (e.g. if T was a vector)
if ndim(dT) < 3:
dT = reshape(dT, (n, 1, shape(dT)[1]))
# reshape ft_X and dT to allow cross product (result has shape n×dₜ×fₜ×f_x)
features = reshape(ft_X, (n, 1, 1, -1)) * reshape(dT, shape(dT) + (1,))
features = transpose(features, [0, 1, 3, 2]) # swap last two dims to match cross_product
features = reshape(features, (size(T), -1))
output = self._model_Y.predict(_add_zeros(np.hstack([W, features])))
return reshape(output, shape(T) + (shape(output)[-1],))
def predict(self, X):
predictions = self.model.predict(X)
return reshape(predictions, (-1, 1)) if self.needs_unravel else predictions
assert shape(T)[0] == shape(X)[0]
ft_X = self._x_featurizer.fit_transform(X)
n = shape(T)[0]
dT = self._dt_featurizer.fit_transform(T)
W = np.zeros((n, self._d_w))
# dT should be an n×dₜ×fₜ array (but if T was a vector, or if there is only one feature,
# dT may be only 2-dimensional)
# promote dT to 3D if necessary (e.g. if T was a vector)
if ndim(dT) < 3:
dT = reshape(dT, (n, 1, shape(dT)[1]))
# reshape ft_X and dT to allow cross product (result has shape n×dₜ×fₜ×f_x)
features = reshape(ft_X, (n, 1, 1, -1)) * reshape(dT, shape(dT) + (1,))
features = transpose(features, [0, 1, 3, 2]) # swap last two dims to match cross_product
features = reshape(features, (size(T), -1))
output = self._model_Y.predict(_add_zeros(np.hstack([W, features])))
return reshape(output, shape(T) + (shape(output)[-1],))
func=(lambda T:
self._one_hot_encoder.transform(
reshape(self._label_encoder.transform(T.ravel()), (-1, 1)))[:, 1:]),
validate=False)
if X is None:
X = np.empty((shape(T)[0], 0))
assert shape(T)[0] == shape(X)[0]
ft_X = self._x_featurizer.fit_transform(X)
n = shape(T)[0]
dT = self._dt_featurizer.fit_transform(T)
W = np.zeros((n, self._d_w))
# dT should be an n×dₜ×fₜ array (but if T was a vector, or if there is only one feature,
# dT may be only 2-dimensional)
# promote dT to 3D if necessary (e.g. if T was a vector)
if ndim(dT) < 3:
dT = reshape(dT, (n, 1, shape(dT)[1]))
# reshape ft_X and dT to allow cross product (result has shape n×dₜ×fₜ×f_x)
features = reshape(ft_X, (n, 1, 1, -1)) * reshape(dT, shape(dT) + (1,))
features = transpose(features, [0, 1, 3, 2]) # swap last two dims to match cross_product
features = reshape(features, (size(T), -1))
output = self._model_Y.predict(_add_zeros(np.hstack([W, features])))
return reshape(output, shape(T) + (shape(output)[-1],))
the corresponding singleton dimensions in the output will be collapsed
(e.g. if both are vectors, then the output of this method will also be a vector)
"""
if X is None:
X = np.empty((shape(T)[0], 0))
assert shape(T)[0] == shape(X)[0]
ft_X = self._x_featurizer.fit_transform(X)
n = shape(T)[0]
dT = self._dt_featurizer.fit_transform(T)
W = np.zeros((n, self._d_w))
# dT should be an n×dₜ×fₜ array (but if T was a vector, or if there is only one feature,
# dT may be only 2-dimensional)
# promote dT to 3D if necessary (e.g. if T was a vector)
if ndim(dT) < 3:
dT = reshape(dT, (n, 1, shape(dT)[1]))
# reshape ft_X and dT to allow cross product (result has shape n×dₜ×fₜ×f_x)
features = reshape(ft_X, (n, 1, 1, -1)) * reshape(dT, shape(dT) + (1,))
features = transpose(features, [0, 1, 3, 2]) # swap last two dims to match cross_product
features = reshape(features, (size(T), -1))
output = self._model_Y.predict(_add_zeros(np.hstack([W, features])))
return reshape(output, shape(T) + (shape(output)[-1],))
columns.append(cross_product(*[self._column_feats(X[:, i], indices.count(i))
for i in range(shape(X)[1])]))
else:
indices = set(indices)
if self._shift == 0: # return features for all columns:
columns.append(np.hstack([self._column_feats(X[:, i], self._shift) for i in range(shape(X)[1])]))
# columns are featurized independently; partial derivatives are only non-zero
# when taken with respect to the same column each time
elif len(indices) == 1:
index = list(indices)[0]
feats = self._column_feats(X[:, index], self._shift)
columns.append(np.hstack([feats if i == index else np.zeros(shape(feats))
for i in range(shape(X)[1])]))
else:
columns.append(np.zeros((n, (self._degree + 1) * ncols)))
return reshape(np.hstack(columns), (n,) + (ncols,) * self._shift + (-1,))
func=(lambda T:
self._one_hot_encoder.transform(
reshape(self._label_encoder.transform(T.ravel()), (-1, 1)))[:, 1:]),
validate=False)