Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_conf_scope_handles_numpy_bools():
cfg = ConfigDict({"a": opt.np.bool_(1)})
assert "a" in cfg()
assert cfg()["a"]
def test_normalize_or_die_for_numpy_datatypes(typename):
dtype = getattr(opt.np, typename)
assert normalize_or_die(dtype(7.0))
import datetime as dt
import json
import os
from io import BufferedReader, FileIO
from hashfs import HashFS
from tinydb import TinyDB
from tinydb_serialization import Serializer, SerializationMiddleware
import sacred.optional as opt
# Set data type values for abstract properties in Serializers
series_type = opt.pandas.Series if opt.has_pandas else None
dataframe_type = opt.pandas.DataFrame if opt.has_pandas else None
ndarray_type = opt.np.ndarray if opt.has_numpy else None
class BufferedReaderWrapper(BufferedReader):
"""Custom wrapper to allow for copying of file handle.
tinydb_serialisation currently does a deepcopy on all the content of the
dictionary before serialisation. By default, file handles are not
copiable so this wrapper is necessary to create a duplicate of the
file handle passes in.
Note that the file passed in will therefor remain open as the copy is the
one that gets closed.
"""
def __init__(self, f_obj):
f_obj = FileIO(f_obj.name)
}
# if in python 2 we want to ignore unicode/str and int/long typechanges
try:
SIMPLIFY_TYPE[unicode] = str
SIMPLIFY_TYPE[long] = int
except NameError:
pass
# if numpy is available we also want to ignore typechanges from numpy
# datatypes to the corresponding python datatype
if opt.has_numpy:
from sacred.optional import np
NP_FLOATS = ['float', 'float16', 'float32', 'float64', 'float128']
for npf in NP_FLOATS:
if hasattr(np, npf):
SIMPLIFY_TYPE[getattr(np, npf)] = float
NP_INTS = ['int', 'int8', 'int16', 'int32', 'int64',
'uint', 'uint8', 'uint16', 'uint32', 'uint64']
for npi in NP_INTS:
if hasattr(np, npi):
SIMPLIFY_TYPE[getattr(np, npi)] = int
SIMPLIFY_TYPE[np.bool_] = bool
def type_changed(old_type, new_type):
return SIMPLIFY_TYPE[type(old_type)] != SIMPLIFY_TYPE[type(new_type)]
def force_bson_encodeable(obj):
import bson
if isinstance(obj, dict):
try:
bson.BSON.encode(obj, check_keys=True)
return obj
except bson.InvalidDocument:
return {
force_valid_bson_key(k): force_bson_encodeable(v)
for k, v in obj.items()
}
elif opt.has_numpy and isinstance(obj, opt.np.ndarray):
return obj
else:
try:
bson.BSON.encode({"dict_just_for_testing": obj})
return obj
except bson.InvalidDocument:
return str(obj)
import jsonpickle
import json as _json
from sacred import optional as opt
json = jsonpickle
__all__ = ("flatten", "restore")
if opt.has_numpy:
import jsonpickle.ext.numpy as jsonpickle_numpy
np = opt.np
jsonpickle_numpy.register_handlers()
if opt.has_pandas:
import jsonpickle.ext.pandas as jsonpickle_pandas
jsonpickle_pandas.register_handlers()
jsonpickle.set_encoder_options("simplejson", sort_keys=True, indent=4)
jsonpickle.set_encoder_options("demjson", compactly=False)
def flatten(obj):
return _json.loads(json.encode(obj, keys=True))
pass
# if numpy is available we also want to ignore typechanges from numpy
# datatypes to the corresponding python datatype
if opt.has_numpy:
from sacred.optional import np
NP_FLOATS = ['float', 'float16', 'float32', 'float64', 'float128']
for npf in NP_FLOATS:
if hasattr(np, npf):
SIMPLIFY_TYPE[getattr(np, npf)] = float
NP_INTS = ['int', 'int8', 'int16', 'int32', 'int64',
'uint', 'uint8', 'uint16', 'uint32', 'uint64']
for npi in NP_INTS:
if hasattr(np, npi):
SIMPLIFY_TYPE[getattr(np, npi)] = int
SIMPLIFY_TYPE[np.bool_] = bool
def type_changed(old_type, new_type):
return SIMPLIFY_TYPE[type(old_type)] != SIMPLIFY_TYPE[type(new_type)]
def is_different(old_value, new_value):
"""Numpy aware comparison between two values."""
if opt.has_numpy:
return not opt.np.array_equal(old_value, new_value)
else:
return old_value != new_value