Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import spikeinterface.extractors as se
import spikeinterface.sorters as sorters
import spikeinterface.comparison as sc
import spikeinterface.widgets as sw
##############################################################################
recording, sorting_true = se.example_datasets.toy_example(num_channels=4, duration=10, seed=0)
sorting_MS4 = sorters.run_mountainsort4(recording)
##############################################################################
cmp_gt_MS4 = sc.compare_sorter_to_ground_truth(sorting_true, sorting_MS4, exhaustive_gt=True)
##############################################################################
#Β To have an overview of the match we can use the unordered agreement matrix
sw.plot_agreement_matrix(cmp_gt_MS4, ordered=False)
##############################################################################
# or ordered
sw.plot_agreement_matrix(cmp_gt_MS4, ordered=True)
"""
import spikeinterface.extractors as se
import spikeinterface.toolkit as st
import spikeinterface.sorters as ss
##############################################################################
# First, let's create a toy example:
recording, sorting = se.example_datasets.toy_example(num_channels=4, duration=30, seed=0)
##############################################################################
# and let's spike sort using klusta
sorting_KL = ss.run_klusta(recording)
print('Units:', sorting_KL.get_unit_ids())
print('Number of units:', len(sorting_KL.get_unit_ids()))
##############################################################################
# There are several available functions that enables to only retrieve units with respect to some rules. For example,
# let's automatically curate the sorting output so that only the units with SNR > 10 and mean firing rate > 2.3 Hz are
# kept:
sorting_fr = st.curation.threshold_firing_rate(sorting_KL, threshold=2.3, threshold_sign='less')
print('Units after FR theshold:', sorting_fr.get_unit_ids())
print('Number of units after FR theshold:', len(sorting_fr.get_unit_ids()))
sorting_snr = st.curation.threshold_snr(sorting_fr, recording, threshold=10, threshold_sign='less')
# The different groups can also be sorted in
# parallel, and the output sorting extractor will have the same property used for sorting. Running in parallel
# (in separate threads) can speed up the computations.
#
# Let's first run the four channel groups sequentially:
t_start = time.time()
sorting_tetrodes = ss.run_sorter('klusta', recording_tetrodes, output_folder='tmp_tetrodes',
grouping_property='group', parallel=False, verbose=False)
print('Elapsed time: ', time.time() - t_start)
##############################################################################
# then in parallel:
t_start = time.time()
sorting_tetrodes_p = ss.run_sorter('klusta', recording_tetrodes, output_folder='tmp_tetrodes_par',
grouping_property='group', parallel=True, verbose=False)
print('Elapsed time parallel: ', time.time() - t_start)
##############################################################################
# The units of the sorted output will have the same property used for spike sorting:
print(sorting_tetrodes.get_shared_unit_property_names())
##############################################################################
# Note that channels can be split by any property. Let's for example assume that half of the tetrodes are in hippocampus
# CA1 region, and the other half is in CA3. first we have to load this property (this can be done also from the '.prb'
# file):
for ch in recording_tetrodes.get_channel_ids()[:int(recording_tetrodes.get_num_channels() / 2)]:
recording_tetrodes.set_channel_property(ch, property_name='region', value='CA1')