Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
best_of_size[current_size] = [tuples[x] for x in chosen_tuples]
if verbose:
print('Best of size ', current_size, ':')
# for each candidate anchor:
# update precision, lower and upper bounds until precision constraints are met
# update best anchor if coverage is larger than current best coverage
stop_this = False
for i, t in zip(chosen_tuples, best_of_size[current_size]):
# choose at most (beam_size - 1) tuples at each step with at most n_feature steps
beta = np.log(1. / (delta / (1 + (beam_size - 1) * n_features)))
# get precision, lower and upper bounds, and coverage for candidate anchor
mean = state['t_positives'][t] / state['t_nsamples'][t]
lb = AnchorBaseBeam.dlow_bernoulli(mean, beta / state['t_nsamples'][t])
ub = AnchorBaseBeam.dup_bernoulli(mean, beta / state['t_nsamples'][t])
coverage = state['t_coverage'][t]
if verbose:
print(i, mean, lb, ub)
# while prec(A) >= tau and prec_lb(A) < tau - eps or prec(A) < tau and prec_ub(A) > tau + eps
# sample more data and update lower and upper precision bounds ...
# ... b/c respectively either prec_lb(A) or prec(A) needs to improve
while ((mean >= desired_confidence and lb < desired_confidence - epsilon_stop) or
(mean < desired_confidence and ub >= desired_confidence + epsilon_stop)):
# sample a batch of data, get new precision, lb and ub values
sample_fns[i](batch_size)
mean = state['t_positives'][t] / state['t_nsamples'][t]
lb = AnchorBaseBeam.dlow_bernoulli(mean, beta / state['t_nsamples'][t])
ub = AnchorBaseBeam.dup_bernoulli(mean, beta / state['t_nsamples'][t])