Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def fit(self, X, y, sample_weight=None):
self.needs_unravel = False
if ndim(y) == 2 and shape(y)[1] > 1:
self.model = WeightedMultiTaskLassoCV(*self.args, **self.kwargs)
else:
if ndim(y) == 2 and shape(y)[1] == 1:
y = np.ravel(y)
self.needs_unravel = True
self.model = WeightedLassoCV(*self.args, **self.kwargs)
self.model.fit(X, y, sample_weight)
# set intercept_ attribute
self.intercept_ = self.model.intercept_
# set coef_ attribute
self.coef_ = self.model.coef_
# set alpha_ attribute
self.alpha_ = self.model.alpha_
# set alphas_ attribute
self.alphas_ = self.model.alphas_
# set n_iter_ attribute
Note that when Y is a vector rather than a 2-dimensional array, the corresponding
singleton dimension will be collapsed (so this method will return a vector)
"""
# TODO: what if input is sparse? - there's no equivalent to einsum,
# but tensordot can't be applied to this problem because we don't sum over m
# TODO: if T0 or T1 are scalars, we'll promote them to vectors;
# should it be possible to promote them to 2D arrays if that's what we saw during training?
eff = self.const_marginal_effect(X)
m = shape(eff)[0]
if ndim(T0) == 0:
T0 = np.repeat(T0, m)
if ndim(T1) == 0:
T1 = np.repeat(T1, m)
dT = T1 - T0
einsum_str = 'myt,mt->my'
if ndim(dT) == 1:
einsum_str = einsum_str.replace('t', '')
if ndim(eff) == ndim(dT): # y is a vector, rather than a 2D array
einsum_str = einsum_str.replace('y', '')
return np.einsum(einsum_str, eff, dT)
Returns
-------
τ: (m × d_y) matrix (or length m vector if Y was a vector)
Heterogeneous treatment effects on each outcome for each sample.
Note that when Y is a vector rather than a 2-dimensional array, the corresponding
singleton dimension will be collapsed (so this method will return a vector)
"""
# TODO: what if input is sparse? - there's no equivalent to einsum,
# but tensordot can't be applied to this problem because we don't sum over m
# TODO: if T0 or T1 are scalars, we'll promote them to vectors;
# should it be possible to promote them to 2D arrays if that's what we saw during training?
eff = self.const_marginal_effect(X)
m = shape(eff)[0]
if ndim(T0) == 0:
T0 = np.repeat(T0, m)
if ndim(T1) == 0:
T1 = np.repeat(T1, m)
dT = T1 - T0
einsum_str = 'myt,mt->my'
if ndim(dT) == 1:
einsum_str = einsum_str.replace('t', '')
if ndim(eff) == ndim(dT): # y is a vector, rather than a 2D array
einsum_str = einsum_str.replace('y', '')
return np.einsum(einsum_str, eff, dT)
def effect_interval(self, X, *, T0, T1, alpha=0.1):
# We can write effect interval as a function of const_marginal_effect_interval for a single treatment
X, T0, T1 = self._est._expand_treatments(X, T0, T1)
lb_pre, ub_pre = self.const_marginal_effect_interval(X, alpha=alpha)
dT = T1 - T0
einsum_str = 'myt,mt->my'
if ndim(dT) == 1:
einsum_str = einsum_str.replace('t', '')
if ndim(lb_pre) == ndim(dT): # y is a vector, rather than a 2D array
einsum_str = einsum_str.replace('y', '')
intrv_pre = np.array([np.einsum(einsum_str, lb_pre, dT), np.einsum(einsum_str, ub_pre, dT)])
lb = np.min(intrv_pre, axis=0)
ub = np.max(intrv_pre, axis=0)
return lb, ub
def _column_feats(self, X, shift):
"""
Apply Hermite function evaluations of degrees 0..`degree` differentiated `shift` times.
When applied to the column `X` of shape(n,), the resulting array has shape(n, (degree + 1)).
"""
assert ndim(X) == 1
# this will have dimension (d,) + shape(X)
coeffs = np.identity(self._degree + shift + 1)[:, shift:]
feats = ((-1) ** shift) * hermeval(X, coeffs) * np.exp(-X * X / 2)
# send the first dimension to the end
return transpose(feats)
T0: (m × dₜ) matrix or vector of length m
Base treatments for each sample
T1: (m × dₜ) matrix or vector of length m
Target treatments for each sample
X: optional (m × dₓ) matrix
Features for each sample
Returns
-------
τ: (m × d_y) matrix
Heterogeneous treatment effects on each outcome for each sample
Note that when Y is a vector rather than a 2-dimensional array, the corresponding
singleton dimension will be collapsed (so this method will return a vector)
"""
if ndim(T0) == 0:
T0 = np.full((1 if X is None else shape(X)[0], self._d_t), T0)
if ndim(T1) == 0:
T1 = np.full((1 if X is None else shape(X)[0], self._d_t), T1)
if X is None:
X = np.empty((shape(T0)[0], 0))
assert shape(T0) == shape(T1)
assert shape(T0)[0] == shape(X)[0]
W = np.zeros((shape(T0)[0], self._d_w)) # can set arbitrarily since values will cancel
ft_X = self._x_featurizer.fit_transform(X)
ft_T0 = self._t_featurizer.fit_transform(T0)
ft_T1 = self._t_featurizer.fit_transform(T1)
Y0 = self._model_Y.predict(_add_ones(np.hstack([W, cross_product(ft_T0, ft_X)])))
Y1 = self._model_Y.predict(_add_ones(np.hstack([W, cross_product(ft_T1, ft_X)])))
return Y1 - Y0
"""
# TODO: what if input is sparse? - there's no equivalent to einsum,
# but tensordot can't be applied to this problem because we don't sum over m
# TODO: if T0 or T1 are scalars, we'll promote them to vectors;
# should it be possible to promote them to 2D arrays if that's what we saw during training?
eff = self.const_marginal_effect(X)
m = shape(eff)[0]
if ndim(T0) == 0:
T0 = np.repeat(T0, m)
if ndim(T1) == 0:
T1 = np.repeat(T1, m)
dT = T1 - T0
einsum_str = 'myt,mt->my'
if ndim(dT) == 1:
einsum_str = einsum_str.replace('t', '')
if ndim(eff) == ndim(dT): # y is a vector, rather than a 2D array
einsum_str = einsum_str.replace('y', '')
return np.einsum(einsum_str, eff, dT)
Features for each sample
Returns
-------
τ: (m × d_y) matrix (or length m vector if Y was a vector)
Heterogeneous treatment effects on each outcome for each sample.
Note that when Y is a vector rather than a 2-dimensional array, the corresponding
singleton dimension will be collapsed (so this method will return a vector)
"""
# TODO: what if input is sparse? - there's no equivalent to einsum,
# but tensordot can't be applied to this problem because we don't sum over m
# TODO: if T0 or T1 are scalars, we'll promote them to vectors;
# should it be possible to promote them to 2D arrays if that's what we saw during training?
eff = self.const_marginal_effect(X)
m = shape(eff)[0]
if ndim(T0) == 0:
T0 = np.repeat(T0, m)
if ndim(T1) == 0:
T1 = np.repeat(T1, m)
dT = T1 - T0
einsum_str = 'myt,mt->my'
if ndim(dT) == 1:
einsum_str = einsum_str.replace('t', '')
if ndim(eff) == ndim(dT): # y is a vector, rather than a 2D array
einsum_str = einsum_str.replace('y', '')
return np.einsum(einsum_str, eff, dT)
Note that when Y or T is a vector rather than a 2-dimensional array,
the corresponding singleton dimensions in the output will be collapsed
(e.g. if both are vectors, then the output of this method will also be a vector)
"""
if X is None:
X = np.empty((shape(T)[0], 0))
assert shape(T)[0] == shape(X)[0]
ft_X = self._x_featurizer.fit_transform(X)
n = shape(T)[0]
dT = self._dt_featurizer.fit_transform(T)
W = np.zeros((n, self._d_w))
# dT should be an n×dₜ×fₜ array (but if T was a vector, or if there is only one feature,
# dT may be only 2-dimensional)
# promote dT to 3D if necessary (e.g. if T was a vector)
if ndim(dT) < 3:
dT = reshape(dT, (n, 1, shape(dT)[1]))
# reshape ft_X and dT to allow cross product (result has shape n×dₜ×fₜ×f_x)
features = reshape(ft_X, (n, 1, 1, -1)) * reshape(dT, shape(dT) + (1,))
features = transpose(features, [0, 1, 3, 2]) # swap last two dims to match cross_product
features = reshape(features, (size(T), -1))
output = self._model_Y.predict(_add_zeros(np.hstack([W, features])))
return reshape(output, shape(T) + (shape(output)[-1],))
T1: (m × dₜ) matrix or vector of length m
Target treatments for each sample
X: optional (m × dₓ) matrix
Features for each sample
Returns
-------
τ: (m × d_y) matrix
Heterogeneous treatment effects on each outcome for each sample
Note that when Y is a vector rather than a 2-dimensional array, the corresponding
singleton dimension will be collapsed (so this method will return a vector)
"""
if ndim(T0) == 0:
T0 = np.full((1 if X is None else shape(X)[0], self._d_t), T0)
if ndim(T1) == 0:
T1 = np.full((1 if X is None else shape(X)[0], self._d_t), T1)
if X is None:
X = np.empty((shape(T0)[0], 0))
assert shape(T0) == shape(T1)
assert shape(T0)[0] == shape(X)[0]
W = np.zeros((shape(T0)[0], self._d_w)) # can set arbitrarily since values will cancel
ft_X = self._x_featurizer.fit_transform(X)
ft_T0 = self._t_featurizer.fit_transform(T0)
ft_T1 = self._t_featurizer.fit_transform(T1)
Y0 = self._model_Y.predict(_add_ones(np.hstack([W, cross_product(ft_T0, ft_X)])))
Y1 = self._model_Y.predict(_add_ones(np.hstack([W, cross_product(ft_T1, ft_X)])))
return Y1 - Y0