Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_CSP():
"""Test CSP"""
n_trials = 90
X = generate_cov(n_trials, 3)
labels = np.array([0, 1, 2]).repeat(n_trials // 3)
# Test Init
csp = CSP()
assert_true(csp.nfilter == 4)
assert_true(csp.metric == 'euclid')
assert_true(csp.log)
csp = CSP(3, 'riemann', False)
assert_true(csp.nfilter == 3)
assert_true(csp.metric == 'riemann')
assert_true(not csp.log)
assert_raises(TypeError, CSP, 'foo')
assert_raises(ValueError, CSP, metric='foo')
assert_raises(TypeError, CSP, log='foo')
# Test fit
csp = CSP()
csp.fit(X, labels % 2) # two classes
csp.fit(X, labels) # 3 classes
assert_raises(ValueError, csp.fit, X, labels * 0.) # 1 class
"""Test CSP"""
n_trials = 90
X = generate_cov(n_trials, 3)
labels = np.array([0, 1, 2]).repeat(n_trials // 3)
# Test Init
csp = CSP()
assert_true(csp.nfilter == 4)
assert_true(csp.metric == 'euclid')
assert_true(csp.log)
csp = CSP(3, 'riemann', False)
assert_true(csp.nfilter == 3)
assert_true(csp.metric == 'riemann')
assert_true(not csp.log)
assert_raises(TypeError, CSP, 'foo')
assert_raises(ValueError, CSP, metric='foo')
assert_raises(TypeError, CSP, log='foo')
# Test fit
csp = CSP()
csp.fit(X, labels % 2) # two classes
csp.fit(X, labels) # 3 classes
assert_raises(ValueError, csp.fit, X, labels * 0.) # 1 class
assert_raises(ValueError, csp.fit, X, labels[:1]) # unequal # of samples
assert_raises(TypeError, csp.fit, X, 'foo') # y must be an array
assert_raises(TypeError, csp.fit, 'foo', labels) # X must be an array
assert_raises(ValueError, csp.fit, X[:, 0], labels)
assert_raises(ValueError, csp.fit, X, X)
assert_array_equal(csp.filters_.shape, [X.shape[1], X.shape[1]])
assert_array_equal(csp.patterns_.shape, [X.shape[1], X.shape[1]])
def test_CSP():
"""Test CSP"""
n_trials = 90
X = generate_cov(n_trials, 3)
labels = np.array([0, 1, 2]).repeat(n_trials // 3)
# Test Init
csp = CSP()
assert_true(csp.nfilter == 4)
assert_true(csp.metric == 'euclid')
assert_true(csp.log)
csp = CSP(3, 'riemann', False)
assert_true(csp.nfilter == 3)
assert_true(csp.metric == 'riemann')
assert_true(not csp.log)
assert_raises(TypeError, CSP, 'foo')
assert_raises(ValueError, CSP, metric='foo')
assert_raises(TypeError, CSP, log='foo')
# Test fit
csp = CSP()
csp.fit(X, labels % 2) # two classes
csp.fit(X, labels) # 3 classes
assert_raises(ValueError, csp.fit, X, labels * 0.) # 1 class
assert_raises(ValueError, csp.fit, X, labels[:1]) # unequal # of samples
assert_raises(TypeError, csp.fit, X, 'foo') # y must be an array
assert_raises(TypeError, csp.fit, 'foo', labels) # X must be an array
assert_raises(ValueError, csp.fit, X[:, 0], labels)
# Test Init
csp = CSP()
assert_true(csp.nfilter == 4)
assert_true(csp.metric == 'euclid')
assert_true(csp.log)
csp = CSP(3, 'riemann', False)
assert_true(csp.nfilter == 3)
assert_true(csp.metric == 'riemann')
assert_true(not csp.log)
assert_raises(TypeError, CSP, 'foo')
assert_raises(ValueError, CSP, metric='foo')
assert_raises(TypeError, CSP, log='foo')
# Test fit
csp = CSP()
csp.fit(X, labels % 2) # two classes
csp.fit(X, labels) # 3 classes
assert_raises(ValueError, csp.fit, X, labels * 0.) # 1 class
assert_raises(ValueError, csp.fit, X, labels[:1]) # unequal # of samples
assert_raises(TypeError, csp.fit, X, 'foo') # y must be an array
assert_raises(TypeError, csp.fit, 'foo', labels) # X must be an array
assert_raises(ValueError, csp.fit, X[:, 0], labels)
assert_raises(ValueError, csp.fit, X, X)
assert_array_equal(csp.filters_.shape, [X.shape[1], X.shape[1]])
assert_array_equal(csp.patterns_.shape, [X.shape[1], X.shape[1]])
# Test transform
Xt = csp.transform(X)
assert_array_equal(Xt.shape, [len(X), X.shape[1]])
assert_raises(TypeError, csp.transform, 'foo')
p, F = p_test.test(covmats, labels)
duration = time() - t_init
fig, axes = plt.subplots(1, 1, figsize=[6, 3], sharey=True)
p_test.plot(nbins=10, axes=axes)
plt.title('F-test distance - %.2f sec.' % duration)
print('p-value: %.3f' % p)
sns.despine()
plt.tight_layout()
plt.show()
###############################################################################
# Classification based permutation test
###############################################################################
clf = make_pipeline(CSP(4), LogisticRegression())
t_init = time()
p_test = PermutationModel(n_perms, model=clf, cv=3, scoring='roc_auc')
p, F = p_test.test(covmats, labels)
duration = time() - t_init
fig, axes = plt.subplots(1, 1, figsize=[6, 3], sharey=True)
p_test.plot(nbins=10, axes=axes)
plt.title('Classification - %.2f sec.' % duration)
print('p-value: %.3f' % p)
sns.despine()
plt.tight_layout()
plt.show()
TangentSpace('riemann'),
LogisticRegression('l1'))
array_clfs['Cosp'] = make_pipeline(CospCovariances(fs=1000, window=32,
overlap=0.95, fmax=300,
fmin=1),
CospBoostingClassifier(baseclf))
array_clfs['HankelCov'] = make_pipeline(DownSampler(2),
HankelCovariances(delays=[2, 4, 8, 12, 16], estimator='oas'),
TangentSpace('logeuclid'),
LogisticRegression('l1'))
array_clfs['CSSP'] = make_pipeline(HankelCovariances(delays=[2, 4, 8, 12, 16],
estimator='oas'),
CSP(30),
LogisticRegression('l1'))
patients = dataframe1.PatientID.values
index = array_clfs.keys() + ['Ensemble']
columns = ['p1', 'p2', 'p3', 'p4']
res_acc = pd.DataFrame(index=index, columns=columns)
res_auc = pd.DataFrame(index=index, columns=columns)
for p in np.unique(patients):
print('Patient %s' % p)
clfs = deepcopy(array_clfs)
ix = patients == p
eeg_data = np.float64(dataframe1.loc[ix].values[:, 1:-2].T)
events = np.int32(dataframe1.Stimulus_Type.loc[ix].values)
from sklearn.svm import SVC
from pyriemann.estimation import Covariances
from pyriemann.spatialfilters import CSP
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from moabb.pipelines.utils import FilterBank
from sklearn.pipeline import make_pipeline
import numpy as np
parameters = {'C': np.logspace(-2, 2, 10)}
clf = GridSearchCV(SVC(kernel='linear'), parameters)
fb = FilterBank(make_pipeline(Covariances(estimator='oas'), CSP(nfilter=4)))
pipe = make_pipeline(fb, SelectKBest(score_func=mutual_info_classif, k=10),
clf)
# this is what will be loaded
PIPELINE = {'name': 'FBCSP + optSVM',
'paradigms': ['FilterBankMotorImagery'],
'pipeline': pipe}