Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
))
}
for var, fun in funs.items():
if not var == FVAL and not getattr(self.history_options,
f'trace_record_{var}'):
continue
for it in range(5):
x_full = xfull(start.history.get_x_trace(it))
val = getattr(start.history, f'get_{var}_trace')(it)
if np.all(np.isnan(val)):
continue
if var in [FVAL, CHI2]:
assert np.isclose(
val, fun(x_full),
), var
elif var in [RES]:
# note that we can expect slight deviations here since
# this res is computed without sensitivities while the
# result here may be computed with with sensitivies
# activated. If this fails to often, increase atol/rtol
assert np.allclose(
val, fun(x_full),
rtol=1e-3, atol=1e-4
), var
elif var in [SRES]:
assert np.allclose(
val, fun(x_full)[:, self.problem.x_free_indices],
), var
elif var in [GRAD, SCHI2]:
assert np.allclose(
val, self.problem.get_reduced_vector(fun(x_full)),
), var
# WLS = np.sum([optimal_surrogate[i]['fun'] for i in range(len(optimal_surrogate))])
# WLS = np.sqrt(WLS)
WLS = compute_WLS(optimal_surrogate, self.problem, edatas, rdatas)
print('cost function: ' + str(WLS))
#TODO: gradient computation
#if sensi_order > 0:
# snllh = compute_snllh(edatas, rdatas, optimal_scalings, obj.x_ids, obj.mapping_par_opt_to_par_sim, obj.dim)
# TODO compute FIM or HESS
# TODO RES, SRES should also be possible, right?
return {
FVAL: WLS,
GRAD: snllh,
HESS: s2nllh,
RES: res,
SRES: sres,
RDATAS: rdatas
}
opt_sres = sim_sres_to_opt_sres(
x_ids,
par_sim_ids,
condition_map_sim_var,
rdata['sres'],
coefficient=1.0
)
sres = np.vstack([sres, opt_sres]) \
if sres.size else opt_sres
ret = {
FVAL: nllh,
CHI2: chi2,
GRAD: snllh,
HESS: s2nllh,
RES: res,
SRES: sres,
RDATAS: rdatas
}
return {
key: val
for key, val in ret.items()
if val is not None
}
# initialize res and sres
if RES in rvals[0]:
res = np.asarray(rvals[0][RES])
else:
res = None
if SRES in rvals[0]:
sres = np.asarray(rvals[0][SRES])
else:
sres = None
# skip iobj=0 after initialization, stack matrices
for rval in rvals[1:]:
if res is not None:
res = np.hstack([res, np.asarray(rval[RES])])
if sres is not None:
sres = np.vstack([sres, np.asarray(rval[SRES])])
# fill res, sres into result
if res is not None:
result[RES] = res
if sres is not None:
result[SRES] = sres
return result
else:
res = self.res(x)
result = {RES: res}
elif sensi_orders == (1,):
if self.sres is True:
sres = self.res(x)[1]
else:
sres = self.sres(x)
result = {SRES: sres}
elif sensi_orders == (0, 1):
if self.sres is True:
res, sres = self.res(x)
else:
res = self.res(x)
sres = self.sres(x)
result = {RES: res,
SRES: sres}
else:
raise ValueError("These sensitivity orders are not supported.")
return result
Returns values indicative of an error, that is with nan entries in all
vectors, and a function value, i.e. nllh, of `np.inf`.
"""
if not amici_model.nt():
nt = sum([data.nt() for data in edatas])
else:
nt = sum([data.nt() if data.nt() else amici_model.nt()
for data in edatas])
n_res = nt * amici_model.nytrue
return {
FVAL: np.inf,
GRAD: np.nan * np.ones(dim),
HESS: np.nan * np.ones([dim, dim]),
RES: np.nan * np.ones(n_res),
SRES: np.nan * np.ones([n_res, dim]),
RDATAS: rdatas
}
def extract_values(mode: str,
result: ResultDict,
options: HistoryOptions) -> Dict:
"""Extract values to record from result."""
ret = dict()
ret_vars = [FVAL, GRAD, HESS, RES, SRES, CHI2, SCHI2]
for var in ret_vars:
if options.get(f'trace_record_{var}', True) and var in result:
ret[var] = result[var]
# write values that weren't set yet with alternative methods
if mode == MODE_RES:
res_result = result.get(RES, None)
sres_result = result.get(SRES, None)
chi2 = res_to_chi2(res_result)
schi2 = sres_to_schi2(res_result, sres_result)
fim = sres_to_fim(sres_result)
alt_values = {CHI2: chi2, SCHI2: schi2, HESS: fim}
if schi2 is not None:
alt_values[GRAD] = 0.5 * schi2
for var, val in alt_values.items():
if val is not None:
ret[var] = ret.get(var, val)
# set everything missing to NaN
for var in ret_vars:
if var not in ret:
ret[var] = np.NaN
# num_threads=min(n_threads, len(edatas)),
#)
sy = [rdata['sy'] for rdata in rdatas]
snllh = self.inner_solver.calculate_gradients(self.inner_problem,
x_inner_opt,
sim,
sy,
parameter_mapping,
x_ids,
amici_model,
snllh)
return {FVAL: nllh,
GRAD: snllh,
HESS: s2nllh,
RES: res,
SRES: sres,
RDATAS: rdatas
}
def as_ndarrays(
result: Dict
) -> Dict:
"""
Convert all array_like objects to np.ndarrays. This has the advantage
of a uniform output datatype which offers various methods to assess
the data.
"""
keys = [GRAD, HESS, RES, SRES]
for key in keys:
if key in result:
value = result[key]
if value is not None:
result[key] = np.array(value)
return result
used_time = time.time() - self._start_time
# create table row
row = pd.Series(name=len(self._trace),
index=self._trace.columns,
dtype='object')
values = {
TIME: used_time,
N_FVAL: self._n_fval,
N_GRAD: self._n_grad,
N_HESS: self._n_hess,
N_RES: self._n_res,
N_SRES: self._n_sres,
FVAL: ret[FVAL],
RES: ret[RES],
SRES: ret[SRES],
CHI2: ret[CHI2],
HESS: ret[HESS],
}
for var, val in values.items():
row[(var, float('nan'))] = val
for var, val in {X: x, GRAD: ret[GRAD], SCHI2: ret[SCHI2]}.items():
if var == X or self.options[f'trace_record_{var}']:
row[var] = val
else:
row[(var, float('nan'))] = np.NaN
self._trace = self._trace.append(row)