Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@pytest.mark.skipif(not opt.has_numpy, reason="requires numpy")
def test_conf_scope_handles_numpy_bools():
@ConfigScope
def conf_scope():
a = opt.np.bool_(1)
cfg = conf_scope()
assert "a" in cfg
assert cfg["a"]
assert isinstance(deps, set)
assert main == Source.create(os.path.join(TEST_DIRECTORY, "dependency_example.py"))
expected_sources = {
Source.create(os.path.join(TEST_DIRECTORY, "__init__.py")),
Source.create(os.path.join(TEST_DIRECTORY, "dependency_example.py")),
Source.create(os.path.join(TEST_DIRECTORY, "foo", "__init__.py")),
Source.create(os.path.join(TEST_DIRECTORY, "foo", "bar.py")),
}
assert sources == expected_sources
assert PackageDependency.create(pytest) in deps
assert PackageDependency.create(mock) in deps
# If numpy is installed on the test system it will automatically be added
# as an additional dependency, so test for that:
if opt.has_numpy:
assert PackageDependency.create(opt.np) in deps
assert len(deps) == 3
else:
assert len(deps) == 2
@pytest.mark.skipif(not opt.has_numpy, reason="requires numpy")
def test_conf_scope_handles_numpy_bools():
cfg = ConfigDict({"a": opt.np.bool_(1)})
assert "a" in cfg()
assert cfg()["a"]
def test_serialize_numpy_arrays():
a = opt.np.array([[1, 2, 3], [4, 5, 6]], dtype=opt.np.float32)
b = restore(flatten(a))
assert opt.np.all(b == a)
assert b.dtype == a.dtype
assert b.shape == a.shape
SIMPLIFY_TYPE = {
type(None): type(None),
bool: bool,
float: float,
int: int,
str: str,
list: list,
tuple: list,
dict: dict,
DogmaticDict: dict,
DogmaticList: list,
}
# if numpy is available we also want to ignore typechanges from numpy
# datatypes to the corresponding python datatype
if opt.has_numpy:
from sacred.optional import np
NP_FLOATS = ["float", "float16", "float32", "float64", "float128"]
for npf in NP_FLOATS:
if hasattr(np, npf):
SIMPLIFY_TYPE[getattr(np, npf)] = float
NP_INTS = [
"int",
"int8",
"int16",
"int32",
"int64",
"uint",
"uint8",
"uint16",
tuple: list,
dict: dict,
DogmaticDict: dict,
DogmaticList: list,
}
# if in python 2 we want to ignore unicode/str and int/long typechanges
try:
SIMPLIFY_TYPE[unicode] = str
SIMPLIFY_TYPE[long] = int
except NameError:
pass
# if numpy is available we also want to ignore typechanges from numpy
# datatypes to the corresponding python datatype
if opt.has_numpy:
from sacred.optional import np
NP_FLOATS = ['float', 'float16', 'float32', 'float64', 'float128']
for npf in NP_FLOATS:
if hasattr(np, npf):
SIMPLIFY_TYPE[getattr(np, npf)] = float
NP_INTS = ['int', 'int8', 'int16', 'int32', 'int64',
'uint', 'uint8', 'uint16', 'uint32', 'uint64']
for npi in NP_INTS:
if hasattr(np, npi):
SIMPLIFY_TYPE[getattr(np, npi)] = int
SIMPLIFY_TYPE[np.bool_] = bool
def type_changed(old_type, new_type):
def normalize_numpy(obj):
if opt.has_numpy and isinstance(obj, opt.np.generic):
try:
return obj.item()
except ValueError:
pass
return obj
import jsonpickle
import json as _json
from sacred import optional as opt
json = jsonpickle
__all__ = ("flatten", "restore")
if opt.has_numpy:
import jsonpickle.ext.numpy as jsonpickle_numpy
np = opt.np
jsonpickle_numpy.register_handlers()
if opt.has_pandas:
import jsonpickle.ext.pandas as jsonpickle_pandas
jsonpickle_pandas.register_handlers()
jsonpickle.set_encoder_options("simplejson", sort_keys=True, indent=4)
jsonpickle.set_encoder_options("demjson", compactly=False)
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import sacred.optional as opt
__all__ = ['whet']
if opt.has_whetlab:
from sacred.ingredients.whetlab import whet
else:
whet = opt.MissingDependencyMock('whetlab')