Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if not self.refit:
self.clfs_ = [clf for clf in self.clfs]
else:
self.clfs_ = [clone(clf) for clf in self.clfs]
if self.verbose > 0:
print("Fitting %d classifiers..." % (len(self.clfs)))
for clf in self.clfs_:
if self.verbose > 0:
i = self.clfs_.index(clf) + 1
print("Fitting clf%d: %s (%d/%d)" %
(i, _name_estimators((clf,))[0][0], i,
len(self.clfs_)))
if self.verbose > 2:
if hasattr(clf, 'verbose'):
clf.set_params(verbose=self.verbose - 2)
if self.verbose > 1:
print(_name_estimators((clf,))[0][1])
if sample_weight is None:
clf.fit(X, self.le_.transform(y))
else:
clf.fit(X, self.le_.transform(y),
sample_weight=sample_weight)
return self
if self.verbose > 0:
print("Fitting %d classifiers..." % (len(self.classifiers)))
for clf in self.clfs_:
if self.verbose > 0:
i = self.clfs_.index(clf) + 1
print("Fitting classifier%d: %s (%d/%d)" %
(i, _name_estimators((clf,))[0][0], i, len(self.clfs_)))
if self.verbose > 2:
if hasattr(clf, 'verbose'):
clf.set_params(verbose=self.verbose - 2)
if self.verbose > 1:
print(_name_estimators((clf,))[0][1])
if sample_weight is None:
clf.fit(X, y)
else:
clf.fit(X, y, sample_weight=sample_weight)
meta_features = self.predict_meta_features(X)
if self.store_train_meta_features:
self.train_meta_features_ = meta_features
if not self.use_features_in_secondary:
pass
elif sparse.issparse(X):
meta_features = sparse.hstack((X, meta_features))
else:
meta_features = np.hstack((X, meta_features))
if self.refit:
self.regr_ = clone(self.regressors)
self.meta_regr_ = clone(self.meta_regressor)
else:
self.regr_ = self.regressors
self.meta_regr_ = self.meta_regressor
if self.verbose > 0:
print("Fitting %d regressors..." % (len(self.regressors)))
for regr in self.regr_:
if self.verbose > 0:
i = self.regr_.index(regr) + 1
print("Fitting regressor%d: %s (%d/%d)" %
(i, _name_estimators((regr,))[0][0], i, len(self.regr_)))
if self.verbose > 2:
if hasattr(regr, 'verbose'):
regr.set_params(verbose=self.verbose - 2)
if self.verbose > 1:
print(_name_estimators((regr,))[0][1])
if sample_weight is None:
regr.fit(X, y)
else:
regr.fit(X, y, sample_weight=sample_weight)
meta_features = self.predict_meta_features(X)
if not self.use_features_in_secondary:
def named_classifiers(self):
return _name_estimators(self.classifiers)
def __init__(self, estimator, min_features=1, max_features=1,
print_progress=True, scoring='accuracy',
cv=5, n_jobs=1,
pre_dispatch='2*n_jobs',
clone_estimator=True):
self.estimator = estimator
self.min_features = min_features
self.max_features = max_features
self.pre_dispatch = pre_dispatch
self.scoring = scoring
self.scorer = get_scorer(scoring)
self.cv = cv
self.print_progress = print_progress
self.n_jobs = n_jobs
self.named_est = {key: value for key, value in
_name_estimators([self.estimator])}
self.clone_estimator = clone_estimator
if self.clone_estimator:
self.est_ = clone(self.estimator)
else:
self.est_ = self.estimator
self.fitted = False
self.interrupted_ = False
# don't mess with this unless testing
self._TESTING_INTERRUPT_MODE = False
def named_regressors(self):
return _name_estimators(self.regressors)
def named_regressors(self):
"""
Returns
-------
List of named estimator tuples, like [('svc', SVC(...))]
"""
return _name_estimators(self.regressors)
def __init__(self, clfs, voting='hard',
weights=None, verbose=0, refit=True):
self.clfs = clfs
self.named_clfs = {key: value for key, value in _name_estimators(clfs)}
self.voting = voting
self.weights = weights
self.verbose = verbose
self.refit = refit
def named_classifiers(self):
return _name_estimators(self.classifiers)
def named_estimators(self):
"""
Returns
-------
List of named estimator tuples, like [('svc', SVC(...))]
"""
return _name_estimators([self.estimator])