Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
neighbors.distances, neighbors.connectivities = compute_connectivities_umap(
neighbors.knn_indices, knn_distances, X.shape[0], n_neighbors=n_neighbors
)
elif method == "hnsw":
X = adata.X if use_rep == "X" else adata.obsm[use_rep]
neighbors = FastNeighbors(n_neighbors=n_neighbors, num_threads=num_threads)
neighbors.fit(
X if n_pcs is None else X[:, :n_pcs],
metric=metric,
random_state=random_state,
**metric_kwds,
)
else:
logg.switch_verbosity("off", module="scanpy")
with warnings.catch_warnings(): # ignore numba warning (umap/issues/252)
warnings.simplefilter("ignore")
neighbors = Neighbors(adata)
neighbors.compute_neighbors(
n_neighbors=n_neighbors,
knn=knn,
n_pcs=n_pcs,
method=method,
use_rep=None if use_rep == "X_pca" else use_rep,
random_state=random_state,
metric=metric,
metric_kwds=metric_kwds,
write_knn_indices=True,
)
logg.switch_verbosity("on", module="scanpy")
tmp_filter &= (adata.var['fit_likelihood'] > min_likelihood)
from .. import AnnData
vdata = AnnData(adata.layers[vkey][:, tmp_filter])
vdata.obs = adata.obs.copy()
vdata.var = adata.var[tmp_filter].copy()
if 'highly_variable' in vdata.var.keys():
vdata.var['highly_variable'] = np.array(vdata.var['highly_variable'], dtype=bool)
import scanpy as sc
logg.switch_verbosity('off', module='scanpy')
sc.pp.pca(vdata, n_comps=20, svd_solver='arpack')
sc.pp.neighbors(vdata, n_pcs=20)
sc.tl.louvain(vdata, resolution=.7 if resolution is None else resolution)
logg.switch_verbosity('on', module='scanpy')
if sort_by == 'velocity_pseudotime' and sort_by not in adata.obs.keys():
velocity_pseudotime(adata, vkey=vkey)
if sort_by in vdata.obs.keys():
vc = vdata.obs['louvain']
vc_cats = vc.cat.categories
mean_times = [np.mean(vdata.obs[sort_by][vc == cat]) for cat in vc_cats]
vdata.obs['louvain'].cat.reorder_categories(vc_cats[np.argsort(mean_times)], inplace=True)
if isinstance(match_with, str) and match_with in adata.obs.keys():
from .utils import most_common_in_list
vc = vdata.obs['louvain']
cats_nums = {cat: 0 for cat in adata.obs[match_with].cat.categories}
for i, cat in enumerate(vc.cat.categories):
cells_in_cat = np.where(vc == cat)[0]
new_cat = most_common_in_list(adata.obs[match_with][cells_in_cat])
data, adata_subset=None, fraction=0.5, vkey="velocity", copy=False
):
adata = data.copy() if copy else data
if adata_subset is None:
from ..preprocessing.moments import moments
from ..preprocessing.neighbors import neighbors
from .velocity import velocity
logg.switch_verbosity("off")
adata_subset = adata.copy()
subset = random_subsample(adata_subset, fraction=fraction, return_subset=True)
neighbors(adata_subset)
moments(adata_subset)
velocity(adata_subset, vkey=vkey)
logg.switch_verbosity("on")
else:
subset = adata.obs_names.isin(adata_subset.obs_names)
V = adata[subset].layers[vkey]
V_subset = adata_subset.layers[vkey]
score = np.nan * (subset == False)
score[subset] = prod_sum_var(V, V_subset) / (norm(V) * norm(V_subset))
adata.obs[f"{vkey}_score_robustness"] = score
return adata_subset if copy else None
logg.switch_verbosity("off", module="scanpy")
with warnings.catch_warnings(): # ignore numba warning (umap/issues/252)
warnings.simplefilter("ignore")
neighbors = Neighbors(adata)
neighbors.compute_neighbors(
n_neighbors=n_neighbors,
knn=knn,
n_pcs=n_pcs,
method=method,
use_rep=None if use_rep == "X_pca" else use_rep,
random_state=random_state,
metric=metric,
metric_kwds=metric_kwds,
write_knn_indices=True,
)
logg.switch_verbosity("on", module="scanpy")
adata.uns["neighbors"] = {}
try:
adata.obsp["distances"] = neighbors.distances
adata.obsp["connectivities"] = neighbors.connectivities
adata.uns["neighbors"]["connectivities_key"] = "connectivities"
adata.uns["neighbors"]["distances_key"] = "distances"
except:
adata.uns["neighbors"]["distances"] = neighbors.distances
adata.uns["neighbors"]["connectivities"] = neighbors.connectivities
if hasattr(neighbors, "knn_indices"):
adata.uns["neighbors"]["indices"] = neighbors.knn_indices
adata.uns["neighbors"]["params"] = {
"n_neighbors": n_neighbors,
"method": method,
min_dispersion = np.percentile(dispersions, 20)
tmp_filter &= (dispersions > min_dispersion)
if 'fit_likelihood' in adata.var.keys() and min_likelihood is not None:
tmp_filter &= (adata.var['fit_likelihood'] > min_likelihood)
from .. import AnnData
vdata = AnnData(adata.layers[vkey][:, tmp_filter])
vdata.obs = adata.obs.copy()
vdata.var = adata.var[tmp_filter].copy()
if 'highly_variable' in vdata.var.keys():
vdata.var['highly_variable'] = np.array(vdata.var['highly_variable'], dtype=bool)
import scanpy as sc
logg.switch_verbosity('off', module='scanpy')
sc.pp.pca(vdata, n_comps=20, svd_solver='arpack')
sc.pp.neighbors(vdata, n_pcs=20)
sc.tl.louvain(vdata, resolution=.7 if resolution is None else resolution)
logg.switch_verbosity('on', module='scanpy')
if sort_by == 'velocity_pseudotime' and sort_by not in adata.obs.keys():
velocity_pseudotime(adata, vkey=vkey)
if sort_by in vdata.obs.keys():
vc = vdata.obs['louvain']
vc_cats = vc.cat.categories
mean_times = [np.mean(vdata.obs[sort_by][vc == cat]) for cat in vc_cats]
vdata.obs['louvain'].cat.reorder_categories(vc_cats[np.argsort(mean_times)], inplace=True)
if isinstance(match_with, str) and match_with in adata.obs.keys():
from .utils import most_common_in_list
vc = vdata.obs['louvain']
def score_robustness(
data, adata_subset=None, fraction=0.5, vkey="velocity", copy=False
):
adata = data.copy() if copy else data
if adata_subset is None:
from ..preprocessing.moments import moments
from ..preprocessing.neighbors import neighbors
from .velocity import velocity
logg.switch_verbosity("off")
adata_subset = adata.copy()
subset = random_subsample(adata_subset, fraction=fraction, return_subset=True)
neighbors(adata_subset)
moments(adata_subset)
velocity(adata_subset, vkey=vkey)
logg.switch_verbosity("on")
else:
subset = adata.obs_names.isin(adata_subset.obs_names)
V = adata[subset].layers[vkey]
V_subset = adata_subset.layers[vkey]
score = np.nan * (subset == False)
score[subset] = prod_sum_var(V, V_subset) / (norm(V) * norm(V_subset))
adata.obs[f"{vkey}_score_robustness"] = score