Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
Returns
-------
y_proba : numpy ndarray
"""
y_probas = []
for yp in self.forward_iter(X, training=False):
yp = yp[0] if isinstance(yp, tuple) else yp
y_probas.append(to_numpy(yp))
y_proba = np.concatenate(y_probas, 0)
return y_proba
def __init__(
self,
scoring,
lower_is_better=True,
on_train=False,
name=None,
target_extractor=to_numpy,
):
self.scoring = scoring
self.lower_is_better = lower_is_better
self.on_train = on_train
self.name = name
self.target_extractor = target_extractor
def check_cv(self, y):
"""Resolve which cross validation strategy is used."""
y_arr = None
if self.stratified:
# Try to convert y to numpy for sklearn's check_cv; if conversion
# doesn't work, still try.
try:
y_arr = to_numpy(y)
except (AttributeError, TypeError):
y_arr = y
if self._is_float(self.cv):
return self._check_cv_float()
return self._check_cv_non_float(y_arr)
def __init__(
self,
scoring,
lower_is_better=True,
on_train=False,
name=None,
target_extractor=to_numpy,
):
self.scoring = scoring
self.lower_is_better = lower_is_better
self.on_train = on_train
self.name = name
self.target_extractor = target_extractor
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
Returns
-------
y_proba : numpy ndarray
"""
y_probas = []
for yp in self.forward_iter(X, training=False):
yp = yp[0] if isinstance(yp, tuple) else yp
y_probas.append(to_numpy(yp))
y_proba = np.concatenate(y_probas, 0)
return y_proba
Returns
-------
y_proba : numpy ndarray
"""
y_probas = []
self.check_is_fitted(attributes=['criterion_'])
bce_logits_loss = isinstance(
self.criterion_, torch.nn.BCEWithLogitsLoss)
for yp in self.forward_iter(X, training=False):
yp = yp[0] if isinstance(yp, tuple) else yp
if bce_logits_loss:
yp = torch.sigmoid(yp)
y_probas.append(to_numpy(yp))
y_proba = np.concatenate(y_probas, 0)
y_proba = np.stack((1 - y_proba, y_proba), axis=1)
return y_proba
def __init__(
self,
scoring,
lower_is_better=True,
on_train=False,
name=None,
target_extractor=to_numpy,
use_caching=True,
):
self.scoring = scoring
self.lower_is_better = lower_is_better
self.on_train = on_train
self.name = name
self.target_extractor = target_extractor
self.use_caching = use_caching
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
Returns
-------
y_pred : numpy ndarray
"""
y_preds = []
for yp in self.forward_iter(X, training=False):
yp = yp[0] if isinstance(yp, tuple) else yp
y_preds.append(to_numpy(yp.max(-1)[-1]))
y_pred = np.concatenate(y_preds, 0)
return y_pred
def score(self, X, y=None):
ds = self.get_dataset(X)
target_iterator = self.get_iterator(ds, training=False)
y_true = np.concatenate([skorch.utils.to_numpy(y) for _, y in target_iterator])
y_pred = self.predict(X)
return f1_score(y_true, y_pred, average='micro')