How to use the pyriemann.tangentspace.TangentSpace function in pyriemann

To help you get started, we’ve selected a few pyriemann examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github alexandrebarachant / pyRiemann / tests / test_tangentspace.py View on Github external
def test_TangentSpace_inversetransform_without_fit():
    """Test inverse transform of Tangent Space without fit."""
    covset = generate_cov(10, 3)
    ts = TangentSpace(metric='identity')
    tsv = ts.fit_transform(covset)
    ts = TangentSpace(metric='riemann')
    cov = ts.inverse_transform(tsv)
    assert_array_almost_equal(covset, cov)
github alexandrebarachant / pyRiemann / tests / test_tangentspace.py View on Github external
def test_TangentSpace_inversetransform_without_fit():
    """Test inverse transform of Tangent Space without fit."""
    covset = generate_cov(10, 3)
    ts = TangentSpace(metric='identity')
    tsv = ts.fit_transform(covset)
    ts = TangentSpace(metric='riemann')
    cov = ts.inverse_transform(tsv)
    assert_array_almost_equal(covset, cov)
github alexandrebarachant / pyRiemann / pyriemann / classification.py View on Github external
def fit(self, X, y):
        """Fit TSclassifier.

        Parameters
        ----------
        X : ndarray, shape (n_trials, n_channels, n_channels)
            ndarray of SPD matrices.
        y : ndarray shape (n_trials, 1)
            labels corresponding to each trial.

        Returns
        -------
        self : TSclassifier. instance
            The TSclassifier. instance.
        """
        ts = TangentSpace(metric=self.metric, tsupdate=self.tsupdate)
        self._pipe = make_pipeline(ts, self.clf)
        self._pipe.fit(X, y)
        return self
github alexandrebarachant / decoding-brain-challenge-2016 / cross_validation_paper.py View on Github external
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.cross_validation import KFold
from sklearn.metrics import roc_auc_score

from utils import (DownSampler, EpochsVectorizer, CospBoostingClassifier,
                   epoch_data)

dataframe1 = pd.read_csv('ecog_train_with_labels.csv')

array_clfs = OrderedDict()

# ERPs models
array_clfs['XdawnCov'] = make_pipeline(XdawnCovariances(6, estimator='oas'),
                                       TangentSpace('riemann'),
                                       LogisticRegression('l2'))

array_clfs['Xdawn'] = make_pipeline(Xdawn(12, estimator='oas'),
                                    DownSampler(5),
                                    EpochsVectorizer(),
                                    LogisticRegression('l2'))

# Induced activity models

baseclf = make_pipeline(ElectrodeSelection(10, metric=dict(mean='logeuclid',
                                                           distance='riemann')),
                        TangentSpace('riemann'),
                        LogisticRegression('l1'))

array_clfs['Cosp'] = make_pipeline(CospCovariances(fs=1000, window=32,
                                                   overlap=0.95, fmax=300,
github alexandrebarachant / decoding-brain-challenge-2016 / generate_models.py View on Github external
# Induced activity models

baseclf = make_pipeline(ElectrodeSelection(10, metric=dict(mean='logeuclid',
                                                           distance='riemann')),
                        TangentSpace('riemann'),
                        LogisticRegression('l1'))

array_clfs['Cosp'] = make_pipeline(CospCovariances(fs=1000, window=32,
                                                   overlap=0.95, fmax=300,
                                                   fmin=1),
                                   CospBoostingClassifier(baseclf))

array_clfs['HankelCov'] = make_pipeline(DownSampler(2),
                                        HankelCovariances(delays=[2, 4, 8, 12, 16], estimator='oas'),
                                        TangentSpace('logeuclid'),
                                        LogisticRegression('l1'))

array_clfs['CSSP'] = make_pipeline(HankelCovariances(delays=[2, 4, 8, 12, 16],
                                                     estimator='oas'),
                                   CSP(30),
                                   LogisticRegression('l1'))

patients = dataframe1.PatientID.values

index = array_clfs.keys() + ['Ensemble']
columns = ['p1', 'p2', 'p3', 'p4']
res_acc = pd.DataFrame(index=index, columns=columns)
res_auc = pd.DataFrame(index=index, columns=columns)


for p in np.unique(patients):
github alexandrebarachant / pyRiemann / pyriemann / tangentspace.py View on Github external
Parameters
        ----------
        X : ndarray, shape (n_trials, n_channels, n_channels)
            ndarray of SPD matrices.
        y : ndarray | None (default None)
            Not used, here for compatibility with sklearn API.
        sample_weight : ndarray | None (default None)
            weight of each sample.

        Returns
        -------
        covs : ndarray, shape (n_trials, n_channels, n_channels)
            covariances matrices after filtering.
        """
        self._ts = TangentSpace(metric=self.metric, tsupdate=self.tsupdate)
        ts = self._fit_lda(X, y, sample_weight=sample_weight)
        return self._retro_project(ts)
github alexandrebarachant / decoding-brain-challenge-2016 / generate_models.py View on Github external
# ERPs models
array_clfs['XdawnCov'] = make_pipeline(XdawnCovariances(6, estimator='oas'),
                                       TangentSpace('riemann'),
                                       LogisticRegression('l2'))

array_clfs['Xdawn'] = make_pipeline(Xdawn(12, estimator='oas'),
                                    DownSampler(5),
                                    EpochsVectorizer(),
                                    LogisticRegression('l2'))

# Induced activity models

baseclf = make_pipeline(ElectrodeSelection(10, metric=dict(mean='logeuclid',
                                                           distance='riemann')),
                        TangentSpace('riemann'),
                        LogisticRegression('l1'))

array_clfs['Cosp'] = make_pipeline(CospCovariances(fs=1000, window=32,
                                                   overlap=0.95, fmax=300,
                                                   fmin=1),
                                   CospBoostingClassifier(baseclf))

array_clfs['HankelCov'] = make_pipeline(DownSampler(2),
                                        HankelCovariances(delays=[2, 4, 8, 12, 16], estimator='oas'),
                                        TangentSpace('logeuclid'),
                                        LogisticRegression('l1'))

array_clfs['CSSP'] = make_pipeline(HankelCovariances(delays=[2, 4, 8, 12, 16],
                                                     estimator='oas'),
                                   CSP(30),
                                   LogisticRegression('l1'))
github NeuroTechX / moabb / examples / plot_cross_session_motor_imagery.py View on Github external
#
# Pipelines must be a dict of sklearn pipeline transformer.
#
# The csp implementation from MNE is used. We selected 8 CSP components, as
# usually done in the litterature.
#
# The riemannian geometry pipeline consists in covariance estimation, tangent
# space mapping and finaly a logistic regression for the classification.

pipelines = {}

pipelines['CSP + LDA'] = make_pipeline(CSP(n_components=8),
                                       LDA())

pipelines['RG + LR'] = make_pipeline(Covariances(),
                                     TangentSpace(),
                                     LogisticRegression(solver='lbfgs'))

##############################################################################
# Evaluation
# ----------
#
# We define the paradigm (LeftRightImagery) and the dataset (BNCI2014001).
# The evaluation will return a dataframe containing a single AUC score for
# each subject / session of the dataset, and for each pipeline.
#
# Results are saved into the database, so that if you add a new pipeline, it
# will not run again the evaluation unless a parameter has changed. Results can
# be overwrited if necessary.

paradigm = LeftRightImagery()
# Because this is being auto-generated we only use 2 subjects
github alexandrebarachant / decoding-brain-challenge-2016 / cross_validation_challenge.py View on Github external
# Induced activity models

baseclf = make_pipeline(ElectrodeSelection(10, metric=dict(mean='logeuclid',
                                                           distance='riemann')),
                        TangentSpace('riemann'),
                        LogisticRegression('l1'))

array_clfs['Cosp'] = make_pipeline(CospCovariances(fs=1000, window=32,
                                                   overlap=0.95, fmax=300,
                                                   fmin=1),
                                   CospBoostingClassifier(baseclf))

array_clfs['HankelCov'] = make_pipeline(DownSampler(2),
                                        HankelCovariances(delays=[2, 4, 8, 12, 16], estimator='oas'),
                                        TangentSpace('logeuclid'),
                                        LogisticRegression('l1'))

array_clfs['CSSP'] = make_pipeline(HankelCovariances(delays=[2, 4, 8, 12, 16],
                                                     estimator='oas'),
                                   CSP(30),
                                   LogisticRegression('l1'))

patients = dataframe1.PatientID.values

index = array_clfs.keys() + ['Ensemble']
columns = ['p1', 'p2', 'p3', 'p4']
res_acc = pd.DataFrame(index=index, columns=columns)
res_auc = pd.DataFrame(index=index, columns=columns)


for p in np.unique(patients):