Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_ftrl_fit_predict_nones():
ft = Ftrl()
ft.fit(None, None)
df_target = ft.predict(None)
assert df_target == None
def test_ftrl_wrong_validation_target_type():
nepochs = 1234
nepochs_validation = 56
nbins = 78
ft = Ftrl(alpha = 0.5, nbins = nbins, nepochs = nepochs)
r = range(ft.nbins)
df_X = dt.Frame(r)
df_y = dt.Frame(r)
df_X_val = df_X
df_y_val = dt.Frame(["Some string data" for _ in r])
with pytest.raises(TypeError) as e:
res = ft.fit(df_X, df_y, df_X_val, df_y_val,
nepochs_validation = 0)
assert ("Training and validation target columns must have the same ltype, "
"got: `integer` and `string`" == str(e.value))
@pytest.mark.parametrize('target',
[[True, False],
["yes", "no"],
[20, 10],
[0.5, -0.5]])
def test_ftrl_fit_predict_bool_binomial(target):
ft = Ftrl(alpha = 0.1, nepochs = 10000, model_type = "binomial")
df_train = dt.Frame([True, False])
df_target = dt.Frame(target)
ft.fit(df_train, df_target)
df_res = ft.predict(df_train)
assert ft.labels[:, 0].to_list() == [sorted(target)]
assert ft.model_type_trained == "binomial"
assert df_res[0, 1] <= 1
assert df_res[0, 1] >= 1 - epsilon
assert df_res[1, 1] >= 0
assert df_res[1, 1] < epsilon
assert df_res[0, 0] >= 0
assert df_res[0, 0] < epsilon
assert df_res[1, 0] <= 1
assert df_res[1, 0] >= 1 - epsilon
def test_ftrl_early_stopping_multinomial():
nepochs = 2000
ft = Ftrl(alpha = 0.2, nepochs = nepochs, double_precision = True)
labels = ["blue", "green", "red"]
df_train = dt.Frame(["cucumber", None, "shift", "sky", "day", "orange",
"ocean"])
df_target = dt.Frame(["green", "red", "red", "blue", "green", None,
"blue"])
res = ft.fit(df_train, df_target, df_train[:4, :], df_target[:4, :],
nepochs_validation = 1, validation_error = 1e-3)
frame_integrity_check(ft.model)
p = ft.predict(df_train)
frame_integrity_check(p)
p_none = 1/p.ncols
p_dict = p.to_dict()
p_list = p.to_list()
sum_p =[sum(row) for row in zip(*p_list)]
delta_sum = [abs(i - j) for i, j in zip(sum_p, [1] * 5)]
def test_ftrl_fit_predict_int():
ft = Ftrl(alpha = 0.1, nepochs = 10000)
df_train = dt.Frame([[0, 1]])
df_target = dt.Frame([[True, False]])
ft.fit(df_train, df_target)
df_target = ft.predict(df_train[:,0])
assert ft.model_type_trained == "binomial"
assert df_target[0, 1] <= 1
assert df_target[0, 1] >= 1 - epsilon
assert df_target[1, 1] >= 0
assert df_target[1, 1] < epsilon
def test_ftrl_construct_wrong_nbins_value():
with pytest.raises(ValueError) as e:
noop(Ftrl(nbins = 0))
assert ("Argument `nbins` in Ftrl() constructor should be positive: 0"
== str(e.value))
def test_ftrl_set_individual_after_params():
ft = Ftrl()
params = ft.params
ft.alpha = tparams.alpha
params_new = ft.params
assert params == Ftrl().params
assert (params_new.alpha, params_new.beta, params_new.lambda1, params_new.lambda2,
params_new.nbins, params_new.mantissa_nbits, params_new.nepochs,
params_new.double_precision, params_new.negative_class,
params_new.interactions, params_new.model_type) == params_new
assert (ft.alpha, ft.beta, ft.lambda1, ft.lambda2,
ft.nbins, ft.mantissa_nbits, ft.nepochs,
ft.double_precision, ft.negative_class,
ft.interactions, ft.model_type) == params_new
def test_ftrl_construct_wrong_lambda1_value():
with pytest.raises(ValueError) as e:
noop(Ftrl(lambda1 = -1.0))
assert ("Argument `lambda1` in Ftrl() constructor should be greater than "
"or equal to zero: -1.0" == str(e.value))
def test_ftrl_construct_wrong_params_name():
WrongParams = collections.namedtuple("WrongParams",["alpha", "lambda1"])
wrong_params = WrongParams(alpha = 1, lambda1 = 0.01)
with pytest.raises(AttributeError) as e:
Ftrl(wrong_params)
assert ("'WrongParams' object has no attribute 'double_precision'"
== str(e.value))
def test_ftrl_pickling_empty_model():
ft_pickled = pickle.dumps(Ftrl())
ft_unpickled = pickle.loads(ft_pickled)
assert ft_unpickled.model == None
assert ft_unpickled.feature_importances == None
assert ft_unpickled.params == Ftrl().params