Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_advanced_feature_transformations_explain_local():
notebookname = "advanced-feature-transformations-explain-local"
input_notebook = "notebooks/" + notebookname + ".ipynb"
output_notebook = "./test/" + notebookname + ".output.ipynb"
pm.execute_notebook(input_notebook, output_notebook)
nb = sb.read_notebook(input_notebook)
nb.scraps # print a dict of all scraps by name
return
def test_gensen_aml_deep_dive(notebooks):
notebook_path = notebooks["gensen_aml_deep_dive"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
CACHE_DIR="./tests/integration/temp",
AZUREML_CONFIG_PATH="./tests/integration/.azureml",
UTIL_NLP_PATH="./utils_nlp",
MAX_EPOCH=1,
TRAIN_SCRIPT="./examples/sentence_similarity/gensen_train.py",
CONFIG_PATH="./examples/sentence_similarity/gensen_config.json",
MAX_TOTAL_RUNS=1,
MAX_CONCURRENT_RUNS=1,
),
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert result["min_val_loss"] > 5
assert result["learning_rate"] >= 0.0001
def test_deep_and_unified_understanding(notebooks):
notebook_path = notebooks["deep_and_unified_understanding"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
sigma_numbers = [0.00317593, 0.00172284, 0.00634005, 0.00164305, 0.00317159]
sigma_bert = [0.1735696 , 0.14028822, 0.14590865, 0.2263149 , 0.20640415,
0.21249843, 0.18685372, 0.14112663, 0.25824168, 0.22399105,
0.2393731 , 0.12868434, 0.27386534, 0.35876372]
np.testing.assert_array_almost_equal(result["sigma_numbers"], sigma_numbers, decimal=3)
np.testing.assert_array_almost_equal(result["sigma_bert"], sigma_bert, decimal=1)
def test_03_notebook_run(classification_notebooks):
notebook_path = classification_notebooks["03_training_accuracy_vs_speed"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(PM_VERSION=pm.__version__),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_accuracies"].data) == 12
assert nb_output.scraps["training_accuracies"].data[-1] > 0.70
assert nb_output.scraps["validation_accuracy"].data > 0.70
def test_od_20_notebook_run(
detection_notebooks,
subscription_id,
resource_group,
workspace_name,
workspace_region,
):
notebook_path = detection_notebooks["20_deployment_on_kubernetes"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
subscription_id=subscription_id,
resource_group=resource_group,
workspace_name=workspace_name,
workspace_region=workspace_region,
),
kernel_name=KERNEL_NAME,
)
def test_msrpc_runs(notebooks):
notebook_path = notebooks["msrpc"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME,
)
def test_02_notebook_run(classification_notebooks, multilabel_ic_data_path):
notebook_path = classification_notebooks["02_multilabel_classification"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=multilabel_ic_data_path,
EPOCHS=1,
IM_SIZE=50,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_accuracies"].data) == 1
def test_12_notebook_run(classification_notebooks, tiny_ic_data_path):
notebook_path = classification_notebooks["12_hard_negative_sampling"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_ic_data_path,
EPOCHS_HEAD=1,
EPOCHS_BODY=1,
IM_SIZE=50,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["train_acc"].data) == 1
def test_bidaf_quickstart(
notebooks, subscription_id, resource_group, workspace_name, workspace_region
):
notebook_path = notebooks["bidaf_quickstart"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters={
"config_path": None,
"subscription_id": subscription_id,
"resource_group": resource_group,
"workspace_name": workspace_name,
"workspace_region": workspace_region,
"webservice_name": "aci-test-service",
},
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict["answer"]
assert result == "Bi-Directional Attention Flow"
"""Handles papermill execution for notebook"""
import papermill
from src.mtool.display import display_error
local_copy = get_outputname(notebook, outfile_root)
if (notebook._hasParameters):
injects = pull_params(current_scene_db, notebook._environmentVars)
try:
papermill.execute_notebook(notebook.getpath(), local_copy, injects)
except Exception as e:
raise e
else:
display_error.no_tagged_cell_warning()
try:
papermill.execute_notebook(notebook.getpath(), local_copy)
except Exception as e:
raise e
#need local output -- temp? or just send it directly to HDFS
return local_copy