Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def reset_layer_params(layer):
if isinstance(layer, torch.nn.Linear):
torch.nn.init.normal_(layer.weight, mean=0., std=1 / math.sqrt(layer.out_features))
torch.nn.init.normal_(layer.bias, mean=0., std=0.1)
elif isinstance(layer, PLinear):
torch.nn.init.normal_(layer.mean, mean=0., std=1 / math.sqrt(layer.out_features))
torch.nn.init.normal_(layer.bias, mean=0., std=0.1)
def __init__(self, in_features, out_features, bias=True):
"""
:param in_features: as name suggests
:param out_features: this essentially the number of neurons
:param bias: if you want a specific bias
"""
super(PLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
# these are the matrices that we will optimize for
self.sigma = Parameter(torch.Tensor(out_features, in_features))
self.mean = Parameter(torch.Tensor(out_features, in_features))
# there can be various ways to sample, given various distributions,
# we will stick with discrete normal as it is way faster
self.w_sampler = self.w_discrete_normal
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
def reset_layer_params(layer):
if isinstance(layer, torch.nn.Linear):
torch.nn.init.normal_(layer.weight, mean=0., std=1 / math.sqrt(layer.out_features))
torch.nn.init.normal_(layer.bias, mean=0., std=0.1)
elif isinstance(layer, PLinear):
torch.nn.init.normal_(layer.mean, mean=0., std=1 / math.sqrt(layer.out_features))
torch.nn.init.normal_(layer.bias, mean=0., std=0.1)
shape = rombus(self.input_size, self.output_size, depth, self.input_size * 2)
elif large_input and large_output:
shape = rectangle(self.input_size, self.output_size, depth - 1)
else:
shape = funnel(self.input_size,self.output_size,depth)
'''
shape = [self.input_size, max([self.input_size*2,self.output_size*2,400]), self.output_size]
if pretrained_net is None:
logging.info(f'Building network of shape: {shape}')
rectifier = torch.nn.SELU #alternative: torch.nn.ReLU
layers = []
fr = False
for ind in range(len(shape) - 1):
linear_function = PLinear if CONFIG.USE_PROBABILISTIC_LINEAR else torch.nn.Linear
layers.append(linear_function(shape[ind],shape[ind+1]))
if ind < len(shape) - 2:
if fr:
fr = False
layers.append(torch.nn.Tanh())
else:
layers.append(rectifier())
self.net = torch.nn.Sequential(*layers)
else:
self.net = pretrained_net
for layer in self.net:
if isinstance(layer, torch.nn.Linear):
if self.input_size is None:
self.input_size = layer.in_features
self.output_size = layer.out_features
"""
Here we define the basic building blocks of our model,
in forward we define how we put it all together along with an input
"""
super(DefaultNet, self).__init__()
if shape is None and pretrained_net is None:
shape = [self.input_size, max([self.input_size*2,self.output_size*2,400]), self.output_size]
if pretrained_net is None:
logging.info(f'Building network of shape: {shape}')
rectifier = torch.nn.SELU #alternative: torch.nn.ReLU
layers = []
for ind in range(len(shape) - 1):
linear_function = PLinear if CONFIG.USE_PROBABILISTIC_LINEAR else torch.nn.Linear
layers.append(linear_function(shape[ind],shape[ind+1]))
if ind < len(shape) - 2:
layers.append(rectifier())
self.net = torch.nn.Sequential(*layers)
else:
self.net = pretrained_net
for layer in self.net:
if isinstance(layer, torch.nn.Linear):
if self.input_size is None:
self.input_size = layer.in_features
self.output_size = layer.out_features
if self.selfaware:
awareness_net_shape = [(self.input_size + self.output_size), max([int((self.input_size + self.output_size) * 1.5), 300]), self.nr_outputs]
awareness_layers = []