Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, fill_method="mean", min_value=None, max_value=None):
"""
Possible values for fill_method:
"zero": fill missing entries with zeros
"mean": fill with column means
"median" : fill with column medians
"min": fill with min value per column
"random": fill with gaussian noise according to mean/std of column
"""
Solver.__init__(
self,
fill_method=fill_method,
min_value=None,
max_value=None)
Smallest possible imputed value
max_value : float
Largest possible imputed value
error_tolerance : bool
Degree of error allowed on reconstructed values. If omitted then
defaults to 0.0001
max_iters : int
Maximum number of iterations for the convex solver
verbose : bool
Print debug info
"""
Solver.__init__(
self,
min_value=min_value,
max_value=max_value)
self.require_symmetric_solution = require_symmetric_solution
self.error_tolerance = error_tolerance
self.max_iters = max_iters
self.verbose = verbose
dropout_probability=0,
batch_size=32,
l1_penalty=0,
l2_penalty=0,
recurrent_weight=0.5,
n_burn_in_epochs=1,
missing_input_noise_weight=0,
output_history_size=25,
patience_epochs=100,
min_improvement=0.999,
max_training_epochs=None,
init_fill_method="zero",
min_value=None,
max_value=None,
verbose=True):
Solver.__init__(
self,
fill_method=init_fill_method,
min_value=min_value,
max_value=max_value)
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.hidden_layer_sizes = hidden_layer_sizes
self.optimizer = optimizer
self.dropout_probability = dropout_probability
self.batch_size = batch_size
self.l1_penalty = l1_penalty
self.l2_penalty = l2_penalty
self.hidden_layer_sizes = hidden_layer_sizes
self.recurrent_weight = recurrent_weight
self.n_burn_in_epochs = n_burn_in_epochs
self,
rank=10,
learning_rate=0.001,
epochs=10000,
patience=5,
l2_penalty=1e-5,
use_bias=True,
min_improvement=0.001,
optimization_algorithm="nadam",
loss='mse',
validation_frac=0.1,
min_value=None,
max_value=None,
normalizer=Scaler(),
verbose=True):
Solver.__init__(
self,
min_value=min_value,
max_value=max_value,
normalizer=normalizer)
self.rank = rank
self.learning_rate = learning_rate
self.epochs = epochs
self.patience = patience
self.l2_penalty = l2_penalty
self.use_bias = use_bias
self.optimization_algorithm = optimization_algorithm
self.loss = loss
self.validation_frac = validation_frac
self.min_improvement = min_improvement
self.normalizer = normalizer
self.verbose = verbose
def __init__(
self,
rank=10,
convergence_threshold=0.00001,
max_iters=200,
gradual_rank_increase=True,
svd_algorithm="arpack",
init_fill_method="zero",
min_value=None,
max_value=None,
verbose=True):
Solver.__init__(
self,
fill_method=init_fill_method,
min_value=min_value,
max_value=max_value)
self.rank = rank
self.max_iters = max_iters
self.svd_algorithm = svd_algorithm
self.convergence_threshold = convergence_threshold
self.gradual_rank_increase = gradual_rank_increase
self.verbose = verbose
add_ones : boolean
Whether to add a constant column of ones. Defaults to True.
n_nearest_columns : int
Number of other columns to use to estimate current column.
Useful when number of columns is huge.
Default is to use all columns.
init_fill_method : str
Valid values: {"mean", "median", or "random"}
(the latter meaning fill with random samples from the observed
values of a column)
verbose : boolean
"""
Solver.__init__(
self,
n_imputations=n_imputations,
min_value=min_value,
max_value=max_value,
fill_method=init_fill_method)
self.visit_sequence = visit_sequence
self.n_burn_in = n_burn_in
self.n_pmm_neighbors = n_pmm_neighbors
self.impute_type = impute_type
self.model = model
self.add_ones = add_ones
self.n_nearest_columns = n_nearest_columns
self.verbose = verbose
def __init__(self,
missing_values=np.nan,
imputation_order='ascending',
n_iter=10,
predictor=None,
sample_posterior=False,
n_nearest_features=None,
initial_strategy="mean",
min_value=None,
max_value=None,
verbose=False,
random_state=None):
Solver.__init__(
self,
min_value=min_value,
max_value=max_value)
self.n_iter = n_iter
self.missing_values = missing_values
self.imputation_order = imputation_order
self.predictor = predictor
self.sample_posterior = sample_posterior
self.n_nearest_features = n_nearest_features
self.initial_strategy = initial_strategy
self.verbose = verbose
self.random_state = random_state