Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'''
__BIC_minimizer is used by the minimize function to get the sigma which gives the lowest overall BIC
'''
BIC_tot = 0
for stress in unique_stresses_f:
failure_current_stress_df = f_df[f_df['stress'] == stress]
FAILURES = failure_current_stress_df['times'].values
if right_censored is not None:
if stress in unique_stresses_rc:
right_cens_current_stress_df = rc_df[rc_df['stress'] == stress]
RIGHT_CENSORED = right_cens_current_stress_df['times'].values
else:
RIGHT_CENSORED = None
else:
RIGHT_CENSORED = None
normal_fit_common_shape = Fit_Normal_2P(failures=FAILURES, right_censored=RIGHT_CENSORED, show_probability_plot=False, print_results=False, force_sigma=common_shape_X)
BIC_tot += normal_fit_common_shape.BIC
return BIC_tot
def LL(params, T_f, T_rc): # log likelihood function (2 parameter weibull)
LL_f = 0
LL_rc = 0
LL_f += Fit_Normal_2P.logf(T_f, params[0], params[1]).sum() # failure times
LL_rc += Fit_Normal_2P.logR(T_rc, params[0], params[1]).sum() # right censored times
return -(LL_f + LL_rc)
self.distribution = Normal_Distribution(mu=self.mu, sigma=self.sigma)
# confidence interval estimates of parameters
Z = -ss.norm.ppf((1 - CI) / 2)
if force_sigma is None:
hessian_matrix = hessian(Fit_Normal_2P.LL)(np.array(tuple(params)), np.array(tuple(failures)), np.array(tuple(right_censored)))
covariance_matrix = np.linalg.inv(hessian_matrix)
self.mu_SE = abs(covariance_matrix[0][0]) ** 0.5
self.sigma_SE = abs(covariance_matrix[1][1]) ** 0.5
self.Cov_mu_sigma = abs(covariance_matrix[0][1])
self.mu_upper = self.mu + (Z * self.mu_SE) # these are unique to normal and lognormal mu params
self.mu_lower = self.mu + (-Z * self.mu_SE)
self.sigma_upper = self.sigma * (np.exp(Z * (self.sigma_SE / self.sigma)))
self.sigma_lower = self.sigma * (np.exp(-Z * (self.sigma_SE / self.sigma)))
else:
hessian_matrix = hessian(Fit_Normal_2P.LL_fs)(np.array(tuple([self.mu])), np.array(tuple(failures)), np.array(tuple(right_censored)), np.array(tuple([force_sigma])))
covariance_matrix = np.linalg.inv(hessian_matrix)
self.mu_SE = abs(covariance_matrix[0][0]) ** 0.5
self.sigma_SE = ''
self.Cov_mu_sigma = ''
self.mu_upper = self.mu + (Z * self.mu_SE) # these are unique to normal and lognormal mu params
self.mu_lower = self.mu + (-Z * self.mu_SE)
self.sigma_upper = ''
self.sigma_lower = ''
Data = {'Parameter': ['Mu', 'Sigma'],
'Point Estimate': [self.mu, self.sigma],
'Standard Error': [self.mu_SE, self.sigma_SE],
'Lower CI': [self.mu_lower, self.sigma_lower],
'Upper CI': [self.mu_upper, self.sigma_upper]}
df = pd.DataFrame(Data, columns=['Parameter', 'Point Estimate', 'Standard Error', 'Lower CI', 'Upper CI'])
self.results = df.set_index('Parameter')
params = [self.mu, self.sigma]
k = len(params)
n = len(all_data)
LL2 = 2 * Fit_Normal_2P.LL(params, failures, right_censored)
self.loglik2 = LL2
if n - k - 1 > 0:
self.AICc = 2 * k + LL2 + (2 * k ** 2 + 2 * k) / (n - k - 1)
else:
self.AICc = 'Insufficient data'
self.BIC = np.log(n) * k + LL2
self.distribution = Normal_Distribution(mu=self.mu, sigma=self.sigma)
# confidence interval estimates of parameters
Z = -ss.norm.ppf((1 - CI) / 2)
if force_sigma is None:
hessian_matrix = hessian(Fit_Normal_2P.LL)(np.array(tuple(params)), np.array(tuple(failures)), np.array(tuple(right_censored)))
covariance_matrix = np.linalg.inv(hessian_matrix)
self.mu_SE = abs(covariance_matrix[0][0]) ** 0.5
self.sigma_SE = abs(covariance_matrix[1][1]) ** 0.5
self.Cov_mu_sigma = abs(covariance_matrix[0][1])
self.mu_upper = self.mu + (Z * self.mu_SE) # these are unique to normal and lognormal mu params
self.mu_lower = self.mu + (-Z * self.mu_SE)
self.sigma_upper = self.sigma * (np.exp(Z * (self.sigma_SE / self.sigma)))
self.sigma_lower = self.sigma * (np.exp(-Z * (self.sigma_SE / self.sigma)))
else:
hessian_matrix = hessian(Fit_Normal_2P.LL_fs)(np.array(tuple([self.mu])), np.array(tuple(failures)), np.array(tuple(right_censored)), np.array(tuple([force_sigma])))
covariance_matrix = np.linalg.inv(hessian_matrix)
self.mu_SE = abs(covariance_matrix[0][0]) ** 0.5
self.sigma_SE = ''
self.Cov_mu_sigma = ''
self.mu_upper = self.mu + (Z * self.mu_SE) # these are unique to normal and lognormal mu params
self.mu_lower = self.mu + (-Z * self.mu_SE)
plt.ylim([0.0001, 0.9999])
plt.gca().set_yscale('function', functions=(axes_transforms.normal_forward, axes_transforms.normal_inverse))
plt.grid(b=True, which='major', color='k', alpha=0.3, linestyle='-')
plt.grid(b=True, which='minor', color='k', alpha=0.08, linestyle='-')
plt.gca().yaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))
ytickvals = [0.0001, 0.001, 0.01, 0.03, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.97, 0.99, 0.999, 0.9999]
plt.yticks(ytickvals)
plt.gca().set_yticklabels(['{:,.2%}'.format(x) for x in ytickvals]) # formats y ticks as percentage
delta = max(x) - min(x)
xvals = np.linspace(min(x) - delta * 0.5, max(x) + delta * 0.5, 1000)
if __fitted_dist_params is not None:
mu = __fitted_dist_params.mu
sigma = __fitted_dist_params.sigma
else:
from reliability.Fitters import Fit_Normal_2P
fit = Fit_Normal_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
mu = fit.mu
sigma = fit.sigma
if 'label' in kwargs:
label = kwargs.pop('label')
else:
label = str('Fitted Normal_2P (μ=' + str(round_to_decimals(mu, dec)) + ', σ=' + str(round_to_decimals(sigma, dec)) + ')')
if 'color' in kwargs:
color = kwargs.pop('color')
data_color = color
else:
color = 'red'
data_color = 'k'
plt.scatter(x, y, marker='.', linewidth=2, c=data_color)
nf = Normal_Distribution(mu=mu, sigma=sigma).CDF(show_plot=False, xvals=xvals)
xrange = plt.gca().get_xlim() # this ensures the previously plotted objects are considered when setting the range
xrange_min = min(min(x) - delta * 0.2, xrange[0])
self.__Expon_2P_params = Fit_Expon_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Expon_2P_lambda = self.__Expon_2P_params.Lambda
self.Expon_2P_gamma = self.__Expon_2P_params.gamma
self.Expon_2P_BIC = self.__Expon_2P_params.BIC
self.Expon_2P_AICc = self.__Expon_2P_params.AICc
self._parametric_CDF_Exponential_2P = self.__Expon_2P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Lognormal_3P_params = Fit_Lognormal_3P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Lognormal_3P_mu = self.__Lognormal_3P_params.mu
self.Lognormal_3P_sigma = self.__Lognormal_3P_params.sigma
self.Lognormal_3P_gamma = self.__Lognormal_3P_params.gamma
self.Lognormal_3P_BIC = self.__Lognormal_3P_params.BIC
self.Lognormal_3P_AICc = self.__Lognormal_3P_params.AICc
self._parametric_CDF_Lognormal_3P = self.__Lognormal_3P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Normal_2P_params = Fit_Normal_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Normal_2P_mu = self.__Normal_2P_params.mu
self.Normal_2P_sigma = self.__Normal_2P_params.sigma
self.Normal_2P_BIC = self.__Normal_2P_params.BIC
self.Normal_2P_AICc = self.__Normal_2P_params.AICc
self._parametric_CDF_Normal_2P = self.__Normal_2P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Lognormal_2P_params = Fit_Lognormal_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Lognormal_2P_mu = self.__Lognormal_2P_params.mu
self.Lognormal_2P_sigma = self.__Lognormal_2P_params.sigma
self.Lognormal_2P_BIC = self.__Lognormal_2P_params.BIC
self.Lognormal_2P_AICc = self.__Lognormal_2P_params.AICc
self._parametric_CDF_Lognormal_2P = self.__Lognormal_2P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Weibull_2P_params = Fit_Weibull_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Weibull_2P_alpha = self.__Weibull_2P_params.alpha
self.Weibull_2P_beta = self.__Weibull_2P_params.beta
FAILURES = failure_current_stress_df['times'].values
len_f = len(FAILURES)
if right_censored is not None:
if stress in unique_stresses_rc:
right_cens_current_stress_df = rc_df[rc_df['stress'] == stress]
RIGHT_CENSORED = right_cens_current_stress_df['times'].values
len_rc = len(RIGHT_CENSORED)
else:
RIGHT_CENSORED = None
len_rc = 0
else:
RIGHT_CENSORED = None
len_rc = 0
weights_array.append(len_f + len_rc)
normal_fit = Fit_Normal_2P(failures=FAILURES, right_censored=RIGHT_CENSORED, show_probability_plot=False, print_results=False)
normal_fit_mu_array.append(normal_fit.mu)
normal_fit_sigma_array.append(normal_fit.sigma)
common_shape_guess = np.average(normal_fit_sigma_array)
def __BIC_minimizer(common_shape_X):
'''
__BIC_minimizer is used by the minimize function to get the sigma which gives the lowest overall BIC
'''
BIC_tot = 0
for stress in unique_stresses_f:
failure_current_stress_df = f_df[f_df['stress'] == stress]
FAILURES = failure_current_stress_df['times'].values
if right_censored is not None:
if stress in unique_stresses_rc:
right_cens_current_stress_df = rc_df[rc_df['stress'] == stress]
RIGHT_CENSORED = right_cens_current_stress_df['times'].values
if force_sigma is None:
self.mu = params[0]
self.sigma = params[1]
else:
self.mu = params * 1 # the *-1 converts ndarray to float64
self.sigma = force_sigma
else:
self.success = False
print('WARNING: Fitting using Autograd FAILED for Normal_2P. The fit from Scipy was used instead so results may not be accurate.')
self.mu = sp[0]
self.sigma = sp[1]
params = [self.mu, self.sigma]
k = len(params)
n = len(all_data)
LL2 = 2 * Fit_Normal_2P.LL(params, failures, right_censored)
self.loglik2 = LL2
if n - k - 1 > 0:
self.AICc = 2 * k + LL2 + (2 * k ** 2 + 2 * k) / (n - k - 1)
else:
self.AICc = 'Insufficient data'
self.BIC = np.log(n) * k + LL2
self.distribution = Normal_Distribution(mu=self.mu, sigma=self.sigma)
# confidence interval estimates of parameters
Z = -ss.norm.ppf((1 - CI) / 2)
if force_sigma is None:
hessian_matrix = hessian(Fit_Normal_2P.LL)(np.array(tuple(params)), np.array(tuple(failures)), np.array(tuple(right_censored)))
covariance_matrix = np.linalg.inv(hessian_matrix)
self.mu_SE = abs(covariance_matrix[0][0]) ** 0.5
self.sigma_SE = abs(covariance_matrix[1][1]) ** 0.5
self.Cov_mu_sigma = abs(covariance_matrix[0][1])
# within this loop, each list of failures and right censored values will be unpacked for each unique stress and plotted as a probability plot as well as the CDF of the common sigma plot
AICc_total = 0
BIC_total = 0
AICc = True
for i, stress in enumerate(unique_stresses_f):
failure_current_stress_df = f_df[f_df['stress'] == stress]
FAILURES = failure_current_stress_df['times'].values
if right_censored is not None:
if stress in unique_stresses_rc:
right_cens_current_stress_df = rc_df[rc_df['stress'] == stress]
RIGHT_CENSORED = right_cens_current_stress_df['times'].values
else:
RIGHT_CENSORED = None
else:
RIGHT_CENSORED = None
normal_fit_common_shape = Fit_Normal_2P(failures=FAILURES, right_censored=RIGHT_CENSORED, show_probability_plot=False, print_results=False, force_sigma=common_shape)
normal_fit_mu_array_common_shape.append(normal_fit_common_shape.mu[0])
if type(normal_fit_common_shape.AICc) == str:
AICc = False
else:
AICc_total += normal_fit_common_shape.AICc
BIC_total += normal_fit_common_shape.BIC
if show_plot is True:
normal_fit_common_shape.distribution.CDF(linestyle='--', color=color_list[i], xvals=xvals)
Probability_plotting.Normal_probability_plot(failures=FAILURES, right_censored=RIGHT_CENSORED, color=color_list[i], label=str(stress))
plt.legend(title='Stress')
plt.xlim(xmin, xmax)
if common_shape_method == 'BIC':
plt.title(str('ALT Normal Probability Plot\nOptimal BIC ' + r'$\sigma$ = ' + str(round(common_shape, 4))))
elif common_shape_method == 'weighted_average':
plt.title(str('ALT Normal Probability Plot\nWeighted average ' + r'$\sigma$ = ' + str(round(common_shape, 4))))
elif common_shape_method == 'average':