Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_TSclassifier():
"""Test TS Classifier"""
covset = generate_cov(40, 3)
labels = np.array([0, 1]).repeat(20)
assert_raises(TypeError, TSclassifier, clf='666')
clf = TSclassifier()
clf.fit(covset, labels)
clf.predict(covset)
clf.predict_proba(covset)
def test_TSclassifier():
"""Test TS Classifier"""
covset = generate_cov(40, 3)
labels = np.array([0, 1]).repeat(20)
assert_raises(TypeError, TSclassifier, clf='666')
clf = TSclassifier()
clf.fit(covset, labels)
clf.predict(covset)
clf.predict_proba(covset)
import seaborn as sns
import matplotlib.pyplot as plt
from collections import OrderedDict
from moabb.datasets.bnci import BNCI2014001
from moabb.datasets.alex_mi import AlexMI
from moabb.datasets.physionet_mi import PhysionetMI
datasets = [AlexMI(with_rest=True),
BNCI2014001(),
PhysionetMI(with_rest=True, feets=False)]
pipelines = OrderedDict()
pipelines['MDM'] = make_pipeline(Covariances('oas'), MDM())
pipelines['TS'] = make_pipeline(Covariances('oas'), TSclassifier())
pipelines['CSP+LDA'] = make_pipeline(Covariances('oas'), CSP(8), LDA())
context = MotorImageryMultiClasses(datasets=datasets, pipelines=pipelines)
results = context.evaluate(verbose=True)
for p in results.keys():
results[p].to_csv('../../results/MotorImagery/MultiClass/%s.csv' % p)
results = pd.concat(results.values())
print(results.groupby('Pipeline').mean())
res = results.pivot(values='Score', columns='Pipeline')
sns.lmplot(data=res, x='CSP+LDA', y='TS', fit_reg=False)
plt.xlim(0.25, 1)
plt.ylim(0.25, 1)
###############################################################################
# Classification with Minimum distance to mean
mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
# Use scikit-learn Pipeline with cross_val_score function
scores = cross_val_score(mdm, cov_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("MDM Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
###############################################################################
# Classification with Tangent Space Logistic Regression
clf = TSclassifier()
# Use scikit-learn Pipeline with cross_val_score function
scores = cross_val_score(clf, cov_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Tangent space Classification accuracy: %f / Chance level: %f" %
(np.mean(scores), class_balance))
###############################################################################
# Classification with CSP + logistic regression
# Assemble a classifier
lr = LogisticRegression()
csp = CSP(n_components=4, reg='ledoit_wolf', log=True)
clf = Pipeline([('CSP', csp), ('LogisticRegression', lr)])
coloredlogs.install(level=logging.DEBUG)
datasets = utils.dataset_search('imagery', events=['supination', 'hand_close'],
has_all_events=False, min_subjects=2,
multi_session=False)
for d in datasets:
d.subject_list = d.subject_list[:10]
paradigm = ImageryNClass(2)
context = WithinSessionEvaluation(paradigm=paradigm,
datasets=datasets,
random_state=42)
pipelines = OrderedDict()
pipelines['av+TS'] = make_pipeline(Covariances(estimator='oas'), TSclassifier())
pipelines['av+CSP+LDA'] = make_pipeline(Covariances(estimator='oas'), CSP(8), LDA())
results = context.process(pipelines, overwrite=True)
analyze(results, './')