Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, estimator):
"""
Args:
estimator (PropensityEstimator):
"""
if not isinstance(estimator, PropensityEstimator):
raise TypeError("PropensityEvaluator should be initialized with PropensityEstimator, got ({}) instead."
.format(type(estimator)))
super(PropensityEvaluator, self).__init__(estimator)
"""
import warnings
import pandas as pd
from .base_estimator import PopulationOutcomeEstimator
from .base_weight import PropensityEstimator
from ..utils.stat_utils import robust_lookup
# TODO: implement a two-caliper truncation, one lower bound truncation epsilon and an upper bound one.
class IPW(PropensityEstimator, PopulationOutcomeEstimator):
"""
Causal model implementing inverse probability (propensity score) weighting.
w_i = 1 / Pr[A=a_i|Xi]
"""
def __init__(self, learner, truncate_eps=None, use_stabilized=False):
"""
Args:
learner: Initialized sklearn model.
truncate_eps (None|float): Optional value between 0 to 0.5 to clip the propensity estimation.
Will clip probabilities between clip_eps and 1-clip_eps.
use_stabilized (bool): Whether to re-weigh the learned weights with the prevalence of the treatment.
See Also: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4351790/#S6title
"""
super(IPW, self).__init__(learner, use_stabilized)
def __init__(self, learner, use_stabilized=False, *args, **kwargs):
"""
Args:
learner: Initialized sklearn model.
use_stabilized (bool): Whether to re-weigh the learned weights with the prevalence of the treatment.
See Also: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4351790/#S6title
"""
super(PropensityEstimator, self).__init__(learner, use_stabilized=use_stabilized)
if not hasattr(self.learner, "predict_proba"):
raise AttributeError("Propensity Estimator must use a machine learning that can predict probabilities"
"(i.e., have predict_proba method)")