Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _make_layer(self, *args, inputs=None, base_block=BaseConvBlock, **kwargs):
# each element in `args` is a dict or module: make a sequential out of them
if args:
layers = []
for item in args:
if isinstance(item, dict):
block = item.pop('base_block', None) or item.pop('base', None) or base_block
block_args = {'inputs': inputs, **dict(Config(kwargs) + Config(item))}
layer = block(**block_args)
inputs = layer(inputs)
layers.append(layer)
elif isinstance(item, nn.Module):
inputs = item(inputs)
layers.append(item)
else:
raise ValueError('Positional arguments of ConvBlock must be either dicts or nn.Modules, \
got instead {}'.format(type(item)))
return nn.Sequential(*layers)
# one block only
return base_block(inputs=inputs, **kwargs)
These defaults can be changed in :meth:`~.TorchModel.build_config` or when calling :meth:`.Pipeline.init_model`.
Examples
--------
.. code-block:: python
@classmethod
def default_config(cls):
config = TorchModel.default_config()
config['initial_block'] = dict(layout='cnap', filters=16, kernel_size=7, strides=2,
pool_size=3, pool_strides=2)
config['body/filters'] = 32
config['head'] = dict(layout='cnadV', dropout_rate=.2)
return config
"""
config = Config()
config['inputs'] = {}
config['placeholder_batch_size'] = 2
config['device'] = None
config['benchmark'] = True
config['microbatch'] = None
config['sync_frequency'] = 1
config['train_steps'] = None
config['loss'] = None
config['optimizer'] = 'Adam'
config['decay'] = None
config['order'] = ['initial_block', 'body', 'head']
config['initial_block'] = {}
config['body'] = {}
def __init__(self, config=None):
self.full_config = Config(config)
self.config = Config(config)
self.train_lock = threading.Lock()
self.input_names = None
self.input_shapes = None
self.target_shape = None
self.classes = None
self.model = None
self.device = None
self.devices = []
self.train_steps = None
self.sync_counter = 0
self.microbatch = None
self.iter_info = {}
def __init__(self, config=None):
self.full_config = Config(config)
self.config = Config(config)
self.train_lock = threading.Lock()
self.input_names = None
self.input_shapes = None
self.target_shape = None
self.classes = None
self.model = None
self.device = None
self.devices = []
self.train_steps = None
self.sync_counter = 0
self.microbatch = None
self.iter_info = {}
def get_defaults(cls, name, kwargs):
""" Fill block params from default config and kwargs """
config = cls.default_config()
_config = Config(config.get(name))
_config = _config + (kwargs or {})
config = {**config['common'], **_config}
return config
These defaults can be changed in :meth:`~.TorchModel.build_config` or when calling :meth:`.Pipeline.init_model`.
Examples
--------
.. code-block:: python
@classmethod
def default_config(cls):
config = TorchModel.default_config()
config['initial_block'] = dict(layout='cnap', filters=16, kernel_size=7, strides=2,
pool_size=3, pool_strides=2)
config['body/filters'] = 32
config['head'] = dict(layout='cnadV', dropout_rate=.2)
return config
"""
config = Config()
config['inputs'] = {}
config['placeholder_batch_size'] = 2
config['device'] = None
config['benchmark'] = True
config['microbatch'] = None
config['sync_frequency'] = 1
config['train_steps'] = None
config['loss'] = None
config['optimizer'] = 'Adam'
config['decay'] = None
config['order'] = ['initial_block', 'body', 'head']
config['initial_block'] = {}
config['body'] = {}
def config(self):
""" Returns values. """
return Config({item[0].value: item[1].value for item in self._config})
These defaults can be changed in :meth:`~.TFModel.build_config` or when calling :meth:`.Pipeline.init_model`.
Examples
--------
.. code-block:: python
@classmethod
def default_config(cls):
config = TFModel.default_config()
config['initial_block'] = dict(layout='cnap', filters=16, kernel_size=7, strides=2,
pool_size=3, pool_strides=2)
config['body/filters'] = 32
config['head'] = dict(layout='cnadV', dropout_rate=.2)
return config
"""
config = Config()
config['inputs'] = {}
config['initial_block'] = {}
config['body'] = {}
config['head'] = {}
config['predictions'] = None
config['output'] = None
config['optimizer'] = ('Adam', dict())
config['decay'] = (None, dict())
config['scope'] = ''
config['common'] = {'batch_norm': {'momentum': .1}}
return config