Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def get_reads(adata, key='fit', scaled=True, use_raw=False):
if 'Ms' not in adata.layers.keys(): use_raw = True
s = make_dense(adata.layers['spliced' if use_raw else 'Ms'])
u = make_dense(adata.layers['unspliced'if use_raw else 'Mu'])
if scaled: u /= adata.var[f'{key}_scaling'].values
return u, s
def __init__(self, adata=None, gene=None, u=None, s=None, use_raw=False, load_pars=None, fit_scaling=False,
fit_time=True, fit_switching=True, fit_steady_states=True, fit_alpha=True, fit_connected_states=True):
super(DynamicsRecovery, self).__init__(adata.n_obs)
_layers = adata[:, gene].layers
self.gene = gene
self.use_raw = use_raw = use_raw or 'Ms' not in _layers.keys()
# extract actual data
if u is None or s is None:
u = make_dense(_layers['unspliced']) if use_raw else make_dense(_layers['Mu'])
s = make_dense(_layers['spliced']) if use_raw else make_dense(_layers['Ms'])
self.s, self.u = s, u
# set weights for fitting (exclude dropouts and extreme outliers)
nonzero = np.ravel(s > 0) & np.ravel(u > 0)
s_filter = np.ravel(s < np.percentile(s[nonzero], 98))
u_filter = np.ravel(u < np.percentile(u[nonzero], 98))
self.weights = s_filter & u_filter & nonzero
self.fit_scaling = fit_scaling
self.fit_time = fit_time
self.fit_alpha = fit_alpha
self.fit_switching = fit_switching
self.fit_steady_states = fit_steady_states
self.connectivities = get_connectivities(adata) if fit_connected_states is True else fit_connected_states
else [name for name in var_names if name in adata.var_names]
)
figsize = rcParams["figure.figsize"]
ncols = len(var_names)
for i, gs in enumerate(
pl.GridSpec(
1, ncols, pl.figure(None, (figsize[0] * ncols, figsize[1]), dpi=dpi)
)
):
idx = np.where(adata.var_names == var_names[i])[0][0]
alpha, ut, st = compute_dynamics(adata, idx)
t = (
adata.obs[xkey]
if xkey in adata.obs.keys()
else make_dense(adata.layers["fit_t"][:, idx])
)
idx_sorted = np.argsort(t)
t = t[idx_sorted]
ax = pl.subplot(gs)
_kwargs = {"alpha": 0.3, "title": "", "xlabel": "time", "ylabel": "counts"}
_kwargs.update(kwargs)
linewidth = 1 if linewidth is None else linewidth
ykey = [ykey] if isinstance(ykey, str) else ykey
for j, key in enumerate(ykey):
if key in adata.layers:
y = make_dense(adata.layers[key][:, idx])[idx_sorted]
ax = scatter(x=t, y=y, color=colors[j], ax=ax, show=False, **_kwargs)
if key == "unspliced":
def __init__(self, adata=None, gene=None, u=None, s=None, use_raw=False, load_pars=None, fit_scaling=False,
fit_time=True, fit_switching=True, fit_steady_states=True, fit_alpha=True, fit_connected_states=True):
super(DynamicsRecovery, self).__init__(adata.n_obs)
_layers = adata[:, gene].layers
self.gene = gene
self.use_raw = use_raw = use_raw or 'Ms' not in _layers.keys()
# extract actual data
if u is None or s is None:
u = make_dense(_layers['unspliced']) if use_raw else make_dense(_layers['Mu'])
s = make_dense(_layers['spliced']) if use_raw else make_dense(_layers['Ms'])
self.s, self.u = s, u
# set weights for fitting (exclude dropouts and extreme outliers)
nonzero = np.ravel(s > 0) & np.ravel(u > 0)
s_filter = np.ravel(s < np.percentile(s[nonzero], 98))
u_filter = np.ravel(u < np.percentile(u[nonzero], 98))
self.weights = s_filter & u_filter & nonzero
self.fit_scaling = fit_scaling
self.fit_time = fit_time
self.fit_alpha = fit_alpha
self.fit_switching = fit_switching
self.fit_steady_states = fit_steady_states
self.connectivities = get_connectivities(adata) if fit_connected_states is True else fit_connected_states
if load_pars and 'fit_alpha' in adata.var.keys():
n_obs, n_var = x.shape
offset, offset_ss = (
np.zeros(n_var, dtype="float32"),
np.zeros(n_var, dtype="float32"),
)
gamma = np.ones(n_var, dtype="float32")
if (res_std is None) or (res2_std is None):
res_std, res2_std = np.ones(n_var), np.ones(n_var)
ones, zeros = np.ones(n_obs), np.zeros(n_obs)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x, y = (
np.vstack((make_dense(x) / res_std, x2 / res2_std)),
np.vstack((make_dense(y) / res_std, y2 / res2_std)),
)
if fit_offset and fit_offset2:
for i in range(n_var):
A = np.c_[
np.vstack(
(np.c_[ones / res_std[i], zeros], np.c_[zeros, ones / res2_std[i]])
),
x[:, i],
]
offset[i], offset_ss[i], gamma[i] = np.linalg.pinv(A.T.dot(A)).dot(
A.T.dot(y[:, i])
)
elif fit_offset:
for i in range(n_var):
def __init__(self, adata=None, Ms=None, Mu=None, groups_for_fit=None, groupby=None, residual=None,
constrain_ratio=None, min_r2=.01, r2_adjusted=True, use_raw=False):
self._adata = adata
self._Ms = adata.layers['spliced'] if use_raw else adata.layers['Ms'] if Ms is None else Ms
self._Mu = adata.layers['unspliced'] if use_raw else adata.layers['Mu'] if Mu is None else Mu
self._Ms, self._Mu = make_dense(self._Ms), make_dense(self._Mu)
n_obs, n_vars = self._Ms.shape
self._residual, self._residual2 = residual, None
self._offset, self._offset2 = np.zeros(n_vars, dtype=np.float32), np.zeros(n_vars, dtype=np.float32)
self._gamma, self._r2 = np.zeros(n_vars, dtype=np.float32), np.zeros(n_vars, dtype=np.float32)
self._beta, self._velocity_genes = np.ones(n_vars, dtype=np.float32), np.ones(n_vars, dtype=bool)
self._groups_for_fit = groups_to_bool(adata, groups_for_fit, groupby)
self._constrain_ratio = constrain_ratio
self._r2_adjusted = r2_adjusted
self._min_r2 = min_r2
def __init__(self, adata=None, gene=None, u=None, s=None, use_raw=False, perc=99, max_iter=10, fit_time=True,
fit_scaling=True, fit_steady_states=True, fit_connected_states=True, fit_basal_transcription=None,
high_pars_resolution=False, steady_state_prior=None, init_vals=None):
self.s, self.u, self.use_raw = None, None, None
_layers = adata[:, gene].layers
self.gene = gene
self.use_raw = use_raw or 'Ms' not in _layers.keys()
# extract actual data
if u is None or s is None:
u = _layers['unspliced'] if self.use_raw else _layers['Mu']
s = _layers['spliced'] if self.use_raw else _layers['Ms']
self.s, self.u = make_dense(s), make_dense(u)
# Basal transcription
if fit_basal_transcription:
self.u0, self.s0 = np.min(u), np.min(s)
self.u -= self.u0
self.s -= self.s0
else:
self.u0, self.s0 = 0, 0
self.alpha, self.beta, self.gamma, self.scaling, self.t_, self.alpha_ = None, None, None, None, None, None
self.u0_, self.s0_, self.weights, self.weights_outer, self.weights_upper, = None, None, None, None, None
self.t, self.tau, self.o, self.tau_, self.likelihood, self.loss, self.pars = None, None, None, None, None, None, None
self.max_iter = max_iter
# partition to total of 5 fitting procedures (t_ and alpha, scaling, rates, t_, all together)
self.simplex_kwargs = {'method': 'Nelder-Mead', 'options': {'maxiter': int(self.max_iter / 5)}}
n_obs, n_var = x.shape
offset, offset_ss = (
np.zeros(n_var, dtype="float32"),
np.zeros(n_var, dtype="float32"),
)
gamma = np.ones(n_var, dtype="float32")
if (res_std is None) or (res2_std is None):
res_std, res2_std = np.ones(n_var), np.ones(n_var)
ones, zeros = np.ones(n_obs), np.zeros(n_obs)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x, y = (
np.vstack((make_dense(x) / res_std, x2 / res2_std)),
np.vstack((make_dense(y) / res_std, y2 / res2_std)),
)
if fit_offset and fit_offset2:
for i in range(n_var):
A = np.c_[
np.vstack(
(np.c_[ones / res_std[i], zeros], np.c_[zeros, ones / res2_std[i]])
),
x[:, i],
]
offset[i], offset_ss[i], gamma[i] = np.linalg.pinv(A.T.dot(A)).dot(
A.T.dot(y[:, i])
)
elif fit_offset:
for i in range(n_var):
A = np.c_[np.hstack((ones / res_std[i], zeros)), x[:, i]]