Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
ub = AnchorBaseBeam.dup_bernoulli(mean, beta / state['t_nsamples'][t])
coverage = state['t_coverage'][t]
if verbose:
print(i, mean, lb, ub)
# while prec(A) >= tau and prec_lb(A) < tau - eps or prec(A) < tau and prec_ub(A) > tau + eps
# sample more data and update lower and upper precision bounds ...
# ... b/c respectively either prec_lb(A) or prec(A) needs to improve
while ((mean >= desired_confidence and lb < desired_confidence - epsilon_stop) or
(mean < desired_confidence and ub >= desired_confidence + epsilon_stop)):
# sample a batch of data, get new precision, lb and ub values
sample_fns[i](batch_size)
mean = state['t_positives'][t] / state['t_nsamples'][t]
lb = AnchorBaseBeam.dlow_bernoulli(mean, beta / state['t_nsamples'][t])
ub = AnchorBaseBeam.dup_bernoulli(mean, beta / state['t_nsamples'][t])
if verbose:
print('%s mean = %.2f lb = %.2f ub = %.2f coverage: %.2f n: %d' %
(t, mean, lb, ub, coverage, state['t_nsamples'][t]))
# if prec(A) > tau and prec_lb(A) > tau - eps then we found an eligible anchor
if mean >= desired_confidence and lb > desired_confidence - epsilon_stop:
if verbose:
print('Found eligible anchor ', t, 'Coverage:',
coverage, 'Is best?', coverage > best_coverage)
# coverage eligible anchor needs to be bigger than current best coverage
if coverage > best_coverage:
best_coverage = coverage
best_tuple = t
Returns
-------
Upper and lower precision bound indices.
"""
sorted_means = np.argsort(means) # ascending sort of anchor candidates by precision
beta = AnchorBaseBeam.compute_beta(n_features, t, delta)
# J = the beam width top anchor candidates with highest precision
# not_J = the rest
J = sorted_means[-top_n:]
not_J = sorted_means[:-top_n]
for f in not_J: # update upper bound for lowest precision anchor candidates
ub[f] = AnchorBaseBeam.dup_bernoulli(means[f], beta / n_samples[f])
for f in J: # update lower bound for highest precision anchor candidates
lb[f] = AnchorBaseBeam.dlow_bernoulli(means[f], beta / n_samples[f])
# for the low precision anchor candidates, compute the upper precision bound and keep the index ...
# ... of the anchor candidate with the highest upper precision value -> ut
# for the high precision anchor candidates, compute the lower precision bound and keep the index ...
# ... of the anchor candidate with the lowest lower precision value -> lt
ut = not_J[np.argmax(ub[not_J])]
lt = J[np.argmin(lb[J])]
return ut, lt