Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_BaseSRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseSRegressor(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert gini(tau, cate_p.flatten()) > .5
def test_BaseTLearner(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseTLearner(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert gini(tau, cate_p.flatten()) > .5
def test_BaseRRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseRRegressor(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, p=e, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert gini(tau, cate_p.flatten()) > .5
def test_BaseXRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseXRegressor(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, p=e, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert gini(tau, cate_p.flatten()) > .5
def test_BaseRLearner(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseRLearner(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, p=e, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert gini(tau, cate_p.flatten()) > .5
def test_BaseXLearner(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseXLearner(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, p=e, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert gini(tau, cate_p.flatten()) > .5
def test_BaseTRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = BaseTRegressor(learner=XGBRegressor())
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert gini(tau, cate_p.flatten()) > .5
def test_XGBTRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = XGBTRegressor()
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert gini(tau, cate_p.flatten()) > .5
def test_MLPTRegressor(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
learner = MLPTRegressor()
# check the accuracy of the ATE estimation
ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
assert (ate_p >= lb) and (ate_p <= ub)
assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD
# check the accuracy of the CATE estimation with the bootstrap CI
cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
assert gini(tau, cate_p.flatten()) > .5