Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
super(SigmoidLayer, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
self.output = T.nnet.sigmoid(self.pre_activation)
class ReLU(Linear):
""" Rectified Linear Unit activation layer (max(0, W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
if b is None:
b = build_shared_zeros((n_out,), 'b')
super(ReLU, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
self.output = relu_f(self.pre_activation)
class SoftPlus(Linear):
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
super(SoftPlus, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
self.output = softplus_f(self.pre_activation)
class BatchNormalizer(Linear):
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
super(BatchNormalizer, self).__init__(rng, input, n_in, n_out, W, b)
self.input = input
x_tmp = (self.input - T.mean(self.input, axis=0)) / (T.std(self.input, axis=0) + 1.E-6)
self.output = T.dot(x_tmp, self.W) + self.b
size=(n_in, n_out)), dtype=theano.config.floatX)
W_values *= 4 # This works for sigmoid activated networks!
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b = build_shared_zeros((n_out,), 'b')
self.input = input
self.W = W
self.b = b
self.params = [self.W, self.b]
self.output = T.dot(self.input, self.W) + self.b
def __repr__(self):
return "Linear"
class SigmoidLayer(Linear):
""" Sigmoid activation layer (sigmoid(W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
super(SigmoidLayer, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
self.output = T.nnet.sigmoid(self.pre_activation)
class ReLU(Linear):
""" Rectified Linear Unit activation layer (max(0, W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
if b is None:
b = build_shared_zeros((n_out,), 'b')
super(ReLU, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
self.output = relu_f(self.pre_activation)
self.params = [self.W, self.b]
self.output = T.dot(self.input, self.W) + self.b
def __repr__(self):
return "Linear"
class SigmoidLayer(Linear):
""" Sigmoid activation layer (sigmoid(W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
super(SigmoidLayer, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
self.output = T.nnet.sigmoid(self.pre_activation)
class ReLU(Linear):
""" Rectified Linear Unit activation layer (max(0, W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
if b is None:
b = build_shared_zeros((n_out,), 'b')
super(ReLU, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
self.output = relu_f(self.pre_activation)
class SoftPlus(Linear):
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
super(SoftPlus, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
super(ReLU, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
self.output = relu_f(self.pre_activation)
class SoftPlus(Linear):
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
super(SoftPlus, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
self.output = softplus_f(self.pre_activation)
class BatchNormalizer(Linear):
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
super(BatchNormalizer, self).__init__(rng, input, n_in, n_out, W, b)
self.input = input
x_tmp = (self.input - T.mean(self.input, axis=0)) / (T.std(self.input, axis=0) + 1.E-6)
self.output = T.dot(x_tmp, self.W) + self.b
class DatasetMiniBatchIterator(object):
""" Basic mini-batch iterator """
def __init__(self, x, y, batch_size=BATCH_SIZE, randomize=False):
self.x = x
self.y = y
self.batch_size = batch_size
self.randomize = randomize
from sklearn.utils import check_random_state
self.rng = check_random_state(42)