Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if type(failures) != np.ndarray:
raise TypeError('failures must be a list or array of failure data')
if type(right_censored) == list:
right_censored = np.array(right_censored)
if type(right_censored) != np.ndarray:
raise TypeError('right_censored must be a list or array of right censored failure data')
all_data = np.hstack([failures, right_censored])
if min(all_data) < 0 or max(all_data) > 1:
raise ValueError('All data must be between 0 and 1 to use the beta distribution.')
bnds = [(0.0001, None), (0.0001, None)] # bounds of solution
# solve it
self.gamma = 0
sp = ss.beta.fit(all_data, floc=0, fscale=1, optimizer='powell') # scipy's answer is used as an initial guess. Scipy is only correct when there is no censored data
guess = [sp[0], sp[1]]
result = minimize(value_and_grad(Fit_Beta_2P.LL), guess, args=(failures, right_censored), jac=True, bounds=bnds, tol=1e-6)
if result.success is True:
params = result.x
self.success = True
self.alpha = params[0]
self.beta = params[1]
else:
self.success = False
warnings.warn('Fitting using Autograd FAILED for Beta_2P. The fit from Scipy was used instead so results may not be accurate.')
self.alpha = sp[0]
self.beta = sp[1]
params = [self.alpha, self.beta]
k = len(params)
n = len(all_data)
LL2 = 2 * Fit_Beta_2P.LL(params, failures, right_censored)
def LL(params, T_f, T_rc): # log likelihood function (2 parameter beta)
LL_f = 0
LL_rc = 0
LL_f += Fit_Beta_2P.logf(T_f, params[0], params[1]).sum() # failure times
LL_rc += Fit_Beta_2P.logR(T_rc, params[0], params[1]).sum() # right censored times
return -(LL_f + LL_rc)
x, y = plotting_positions(failures=failures, right_censored=right_censored, h1=h1, h2=h2)
plt.ylim([0.0001, 0.9999])
plt.xlim([-0.1, 1.1])
plt.grid(b=True, which='major', color='k', alpha=0.3, linestyle='-')
plt.grid(b=True, which='minor', color='k', alpha=0.08, linestyle='-')
plt.gca().yaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))
ytickvals = [0.001, 0.01, 0.025, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.975, 0.99, 0.999]
plt.yticks(ytickvals)
plt.gca().set_yticklabels(['{:,.1%}'.format(x) for x in ytickvals]) # formats y ticks as percentage
xvals = np.linspace(0, 1, 1000)
if __fitted_dist_params is not None:
alpha = __fitted_dist_params.alpha
beta = __fitted_dist_params.beta
else:
from reliability.Fitters import Fit_Beta_2P
fit = Fit_Beta_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
alpha = fit.alpha
beta = fit.beta
if 'label' in kwargs:
label = kwargs.pop('label')
else:
label = str('Fitted Beta_2P (α=' + str(round_to_decimals(alpha, dec)) + ', β=' + str(round_to_decimals(beta, dec)) + ')')
if 'color' in kwargs:
color = kwargs.pop('color')
data_color = color
else:
color = 'red'
data_color = 'k'
plt.scatter(x, y, marker='.', linewidth=2, c=data_color)
if show_fitted_distribution is True:
bf = Beta_Distribution(alpha=alpha, beta=beta).CDF(show_plot=False, xvals=xvals)
f_beta = lambda x: axes_transforms.beta_forward(x, alpha, beta)
def LL(params, T_f, T_rc): # log likelihood function (2 parameter beta)
LL_f = 0
LL_rc = 0
LL_f += Fit_Beta_2P.logf(T_f, params[0], params[1]).sum() # failure times
LL_rc += Fit_Beta_2P.logR(T_rc, params[0], params[1]).sum() # right censored times
return -(LL_f + LL_rc)
self.__Gamma_2P_params = Fit_Gamma_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Gamma_2P_alpha = self.__Gamma_2P_params.alpha
self.Gamma_2P_beta = self.__Gamma_2P_params.beta
self.Gamma_2P_BIC = self.__Gamma_2P_params.BIC
self.Gamma_2P_AICc = self.__Gamma_2P_params.AICc
self._parametric_CDF_Gamma_2P = self.__Gamma_2P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Expon_1P_params = Fit_Expon_1P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Expon_1P_lambda = self.__Expon_1P_params.Lambda
self.Expon_1P_BIC = self.__Expon_1P_params.BIC
self.Expon_1P_AICc = self.__Expon_1P_params.AICc
self._parametric_CDF_Exponential_1P = self.__Expon_1P_params.distribution.CDF(xvals=d, show_plot=False)
if max(failures) <= 1:
self.__Beta_2P_params = Fit_Beta_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Beta_2P_alpha = self.__Beta_2P_params.alpha
self.Beta_2P_beta = self.__Beta_2P_params.beta
self.Beta_2P_BIC = self.__Beta_2P_params.BIC
self.Beta_2P_AICc = self.__Beta_2P_params.AICc
self._parametric_CDF_Beta_2P = self.__Beta_2P_params.distribution.CDF(xvals=d, show_plot=False)
else:
self.Beta_2P_alpha = 0
self.Beta_2P_beta = 0
self.Beta_2P_BIC = 0
self.Beta_2P_AICc = 0
# assemble the output dataframe
DATA = {'Distribution': ['Weibull_3P', 'Weibull_2P', 'Normal_2P', 'Exponential_1P', 'Exponential_2P', 'Lognormal_2P', 'Lognormal_3P', 'Gamma_2P', 'Gamma_3P', 'Beta_2P'],
'Alpha': [self.Weibull_3P_alpha, self.Weibull_2P_alpha, '', '', '', '', '', self.Gamma_2P_alpha, self.Gamma_3P_alpha, self.Beta_2P_alpha],
'Beta': [self.Weibull_3P_beta, self.Weibull_2P_beta, '', '', '', '', '', self.Gamma_2P_beta, self.Gamma_3P_beta, self.Beta_2P_beta],
'Gamma': [self.Weibull_3P_gamma, '', '', '', self.Expon_2P_gamma, '', self.Lognormal_3P_gamma, '', self.Gamma_3P_gamma, ''],
params = [self.alpha, self.beta]
k = len(params)
n = len(all_data)
LL2 = 2 * Fit_Beta_2P.LL(params, failures, right_censored)
self.loglik2 = LL2
if n - k - 1 > 0:
self.AICc = 2 * k + LL2 + (2 * k ** 2 + 2 * k) / (n - k - 1)
else:
self.AICc = 'Insufficient data'
self.BIC = np.log(n) * k + LL2
self.distribution = Beta_Distribution(alpha=self.alpha, beta=self.beta)
# confidence interval estimates of parameters
Z = -ss.norm.ppf((1 - CI) / 2)
hessian_matrix = hessian(Fit_Beta_2P.LL)(np.array(tuple(params)), np.array(tuple(failures)), np.array(tuple(right_censored)))
covariance_matrix = np.linalg.inv(hessian_matrix)
self.alpha_SE = abs(covariance_matrix[0][0]) ** 0.5
self.beta_SE = abs(covariance_matrix[1][1]) ** 0.5
self.Cov_alpha_beta = abs(covariance_matrix[0][1])
self.alpha_upper = self.alpha * (np.exp(Z * (self.alpha_SE / self.alpha)))
self.alpha_lower = self.alpha * (np.exp(-Z * (self.alpha_SE / self.alpha)))
self.beta_upper = self.beta * (np.exp(Z * (self.beta_SE / self.beta)))
self.beta_lower = self.beta * (np.exp(-Z * (self.beta_SE / self.beta)))
Data = {'Parameter': ['Alpha', 'Beta'],
'Point Estimate': [self.alpha, self.beta],
'Standard Error': [self.alpha_SE, self.beta_SE],
'Lower CI': [self.alpha_lower, self.beta_lower],
'Upper CI': [self.alpha_upper, self.beta_upper]}
df = pd.DataFrame(Data, columns=['Parameter', 'Point Estimate', 'Standard Error', 'Lower CI', 'Upper CI'])
self.results = df.set_index('Parameter')