Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
sclf = StackingClassifier(classifiers=[clf1, clf2, clf3],
meta_classifier=lr)
for clf, label in zip([clf1, clf2, clf3, sclf],
['KNN',
'Random Forest',
'Naive Bayes',
'StackingClassifier']):
scores = model_selection.cross_val_score(clf, X, y,
cv=3, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
elif stack == 2:
sclf = StackingClassifier(classifiers=[clf1, clf2, clf3],
use_probas=True,
average_probas=False,
meta_classifier=lr)
for clf, label in zip([clf1, clf2, clf3, sclf],
['KNN',
'Random Forest',
'Naive Bayes',
'StackingClassifier']):
scores = model_selection.cross_val_score(clf, X, y,
cv=3, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
elif stack == 3:
sclf = StackingClassifier(classifiers=[clf1, clf2, clf3],
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from mlxtend.classifier import StackingClassifier
from sklearn.model_selection import GridSearchCV
import numpy as np
clf1 = KNeighborsClassifier(n_neighbors=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
lr = LogisticRegression()
print('3-fold cross validation:\n')
stack = 2
if stack == 1:
sclf = StackingClassifier(classifiers=[clf1, clf2, clf3],
meta_classifier=lr)
for clf, label in zip([clf1, clf2, clf3, sclf],
['KNN',
'Random Forest',
'Naive Bayes',
'StackingClassifier']):
scores = model_selection.cross_val_score(clf, X, y,
cv=3, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
elif stack == 2:
sclf = StackingClassifier(classifiers=[clf1, clf2, clf3],
use_probas=True,
################## load data #####################
iris = datasets.load_iris()
x, y = iris.data[:, 1:3], iris.target
################## define classifier #####################
clf1 = KNeighborsClassifier(n_neighbors = 1)
clf2 = RandomForestClassifier(random_state = 1)
clf3 = GaussianNB()
lr = LogisticRegression()
sclf = StackingClassifier(classifiers = [clf1, clf2, clf3], meta_classifier = lr)
################## class result #####################
for clf, label in zip([clf1, clf2, clf3, sclf],
['KNN',
'Random Forest',
'Naive Bayes',
'StackingClassifier']):
scores = model_selection.cross_val_score(clf, x, y, cv = 3, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
import matplotlib.pyplot as plt
def stacking(para, X, y):
stack_lvl_0 = StackingClassifier(
classifiers=para["lvl_0"], meta_classifier=para["top"]
)
stack_lvl_1 = StackingClassifier(
classifiers=para["lvl_1"], meta_classifier=stack_lvl_0
)
scores = cross_val_score(stack_lvl_1, X, y, cv=3)
return scores.mean()
from mlxtend.classifier import EnsembleVoteClassifier
from xgboost import XGBClassifier
clf1 = LogisticRegression(random_state=0)
clf2 = XGBClassifier(random_state=0)
clf3 = SVC(random_state=0, kernel='linear', probability=True)
clf4 = MLPClassifier(random_state=0)
model = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3, clf4],
weights=[1, 2, 2, 1], voting='soft', verbose=2)
elif model_type == 'stack':
from mlxtend.classifier import StackingClassifier
from xgboost import XGBClassifier
clf1 = XGBClassifier(random_state=0)
clf2 = SVC(random_state=0, kernel='linear', probability=True)
clf3 = MLPClassifier(random_state=0)
lr = LogisticRegression()
model = StackingClassifier(classifiers=[clf1, clf2, clf3],
use_probas=True,
average_probas=False,
meta_classifier=lr)
else:
raise ValueError('model type set error.')
return model
def stacking(para, X, y):
stack_lvl_0 = StackingClassifier(
classifiers=para["lvl_0"], meta_classifier=para["top"]
)
stack_lvl_1 = StackingClassifier(
classifiers=para["lvl_1"], meta_classifier=stack_lvl_0
)
scores = cross_val_score(stack_lvl_1, X, y, cv=3)
return scores.mean()
from sklearn.linear_model import RidgeClassifier
from sklearn.naive_bayes import GaussianNB
import warnings
warnings.filterwarnings("ignore")
dataset = np.loadtxt('../Dataset/comb.csv', delimiter=",")
# split data into X and y
X = dataset[:,0:np.array(dataset).shape[1] - 1]
y = dataset[:,np.array(dataset).shape[1] - 1]
clf1 = LinearDiscriminantAnalysis()
clf2 = RidgeClassifier()
clf4 = RandomForestClassifier()
clf3 = GaussianNB()
sclf = StackingClassifier(classifiers=[clf1, clf3, clf4],
meta_classifier=clf2)
print('10-fold cross validation:\n')
for clf, label in zip([clf1, clf2, clf4, sclf],
['LDA',
'Gaussian Naive Bayes',
'Random Forest',
'Meta - Ridge Classifier']):
scores = model_selection.cross_val_score(clf,X,y, cv=10, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))