Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_write_entry():
""" Test serialising an LDAP entry. """
ent = LDAPEntry("cn=test")
ent["cn"] = "test"
ent["jpegPhoto"] = b"1223122130008283938282931232"
ent["sn"] = "test😊"
ent["sn"].append(" test2")
with StringIO() as out:
ldif = LDIFWriter(out, max_length=32)
ldif.write_entry(ent)
content = out.getvalue()
contlines = content.split("\n")
surnames = {
b64decode(line.split(":: ")[1]).decode("UTF-8")
for line in contlines
if "sn" in line
}
jpeg_lines = []
def test_search_with_managedsait_ctrl(ipaddr):
""" Test searching with manageDsaIT control. """
refdn = LDAPDN("o=admin-ref,ou=nerdherd,dc=bonsai,dc=test")
cli = LDAPClient("ldap://%s" % ipaddr)
with cli.connect() as conn:
res = conn.search(refdn, LDAPSearchScope.BASE, attrlist=["ref"])[0]
assert str(res.dn) == "cn=admin,dc=bonsai,dc=test"
cli.set_managedsait(True)
with cli.connect() as conn:
res = conn.search(refdn, LDAPSearchScope.BASE, attrlist=["ref"])[0]
assert refdn == res.dn
assert "ldap://bonsai.test/cn=admin,dc=bonsai,dc=test" == res["ref"][0]
def test_dumpload():
X, y = make_friedman1(n_samples=10000, noise=5)
n, m = X.shape
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.5)
model = PaloForest(distribution="gaussian",
n_estimators=100,
learning_rate=1.0,
max_depth=4,
subsample0=0.5)
print("\n")
print("# Test Dump/Load")
print("-----------------------------------------------------")
print(" model_name train_time predict_time rmse ")
print("-----------------------------------------------------")
print(" {0:12} {1:12} {2:12} {3:.5f}".format(
"baseline", "-", "-", np.std(y_test)))
# Fit
start = time.time()
model.fit(X_train, y_train)
n_samples = 10000
test_size = 0.2
n_est = 10
max_depth = 5
lr = 0.1
X, y = make_hastie_11_2(n_samples)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=test_size)
model_org = PaloForest(distribution="bernoulli",
n_estimators=n_est,
learning_rate=lr,
max_depth=max_depth,
calibrate=False)
model_clb = PaloForest(distribution="bernoulli",
n_estimators=n_est,
learning_rate=lr,
max_depth=max_depth,
calibrate=True)
model_org.fit(X_train, y_train)
y_hat = model_org.predict_proba(X_test)[:,1]
auc_org = roc_auc_score(y_test, y_hat)
brier_org = brier_score_loss(y_test, y_hat)
model_clb.fit(X_train, y_train)
y_hat = model_clb.predict_proba(X_test)[:,1]
auc_clb = roc_auc_score(y_test, y_hat)
brier_clb = brier_score_loss(y_test, y_hat)
self.assertTrue(auc_org > 0.5)
def test_dumpload():
X, y = make_hastie_10_2(n_samples=10000)
y[y<0] = 0
n, m = X.shape
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.5)
model = PaloForest(distribution="bernoulli",
n_estimators=10,
learning_rate=1.0,
max_depth=4,
subsample0=0.5,
calibrate=True)
print("\n")
print("# Test Dump/Load")
print("-----------------------------------------------------")
print(" model_name train_time predict_time auc ")
print("-----------------------------------------------------")
print(" {0:12} {1:12} {2:12} {3:.5f}".format(
"baseline", "-", "-", 0.5))
# Fit
start = time.time()
def test_classification():
X, y = make_hastie_10_2(n_samples=10000)
y[y<0] = 0
n, m = X.shape
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.5)
models = {"palofrst_org": PaloForest(distribution="bernoulli",
n_estimators=10,
learning_rate=1.0,
max_depth=5,
subsample0=0.5,
calibrate=False),
"palofrst_clb": PaloForest(distribution="bernoulli",
n_estimators=10,
learning_rate=1.0,
max_depth=5,
subsample0=0.5,
calibrate=True)}
print("\n")
print("# Test Classification")
print("-----------------------------------------------------")
print(" model_name train_time auc brier ")
def test_cls(self):
np.random.seed(1)
n_samples = 10000
test_size = 0.2
n_est = 10
max_depth = 5
lr = 0.1
X, y = make_hastie_11_2(n_samples)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=test_size)
model_org = PaloForest(distribution="bernoulli",
n_estimators=n_est,
learning_rate=lr,
max_depth=max_depth,
calibrate=False)
model_clb = PaloForest(distribution="bernoulli",
n_estimators=n_est,
learning_rate=lr,
max_depth=max_depth,
calibrate=True)
model_org.fit(X_train, y_train)
y_hat = model_org.predict_proba(X_test)[:,1]
auc_org = roc_auc_score(y_test, y_hat)
brier_org = brier_score_loss(y_test, y_hat)
model_clb.fit(X_train, y_train)
def test_classification():
X, y = make_hastie_10_2(n_samples=10000)
y[y<0] = 0
n, m = X.shape
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.5)
models = {"palobst": PaloBoost(distribution="bernoulli",
n_estimators=10,
learning_rate=1.0,
max_depth=4,
subsample=0.5),
"palofrst": PaloForest(distribution="bernoulli",
n_estimators=10,
learning_rate=1.0,
max_depth=4,
subsample0=0.5),
"gbm": GBM(distribution="bernoulli",
n_estimators=10,
learning_rate=1.0,
max_depth=4,
subsample=0.5),
"sklearn": GradientBoostingClassifier(
n_estimators=10,
learning_rate=1.0,
max_depth=4,
subsample=0.5)}
print("\n")
def test_rgs(self):
np.random.seed(1)
n_samples = 10000
test_size = 0.2
n_est = 100
max_depth = 7
lr = 0.1
X, y = make_friedman1_poly(n_samples=n_samples)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=test_size)
model_palo = PaloBoost(distribution="gaussian",
n_estimators=n_est,
learning_rate=lr,
max_depth=max_depth)
model_sklr = GradientBoostingRegressor(
n_estimators=n_est,
learning_rate=lr,
max_depth=max_depth)
model_palo.fit(X_train, y_train)
y_hat = model_palo.predict(X_test)
rmse_palo = np.sqrt(np.mean((y_test - y_hat)**2))
model_sklr.fit(X_train, y_train)
y_hat = model_sklr.predict(X_test)
rmse_sklr = np.sqrt(np.mean((y_test - y_hat)**2))
def test_classification():
X, y = make_hastie_10_2(n_samples=10000)
y[y<0] = 0
n, m = X.shape
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.5)
models = {"palobst": PaloBoost(distribution="bernoulli",
n_estimators=10,
learning_rate=1.0,
max_depth=4,
subsample=0.5),
"palofrst": PaloForest(distribution="bernoulli",
n_estimators=10,
learning_rate=1.0,
max_depth=4,
subsample0=0.5),
"gbm": GBM(distribution="bernoulli",
n_estimators=10,
learning_rate=1.0,
max_depth=4,
subsample=0.5),
"sklearn": GradientBoostingClassifier(
n_estimators=10,