Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import nni
def max_pool(k):
pass
h_conv1 = 1
nni.choice({'foo': foo, 'bar': bar})(1)
conv_size = nni.choice({2: 2, 3: 3, 5: 5, 7: 7}, name='conv_size')
abc = nni.choice({'2': '2', 3: 3, '(5 * 6)': 5 * 6, 7: 7}, name='abc')
h_pool1 = nni.function_choice({'max_pool': lambda : max_pool(h_conv1),
'h_conv1': lambda : h_conv1,
'avg_pool': lambda : avg_pool(h_conv2, h_conv3)}
)
h_pool1 = nni.function_choice({'max_pool(h_conv1)': lambda : max_pool(
h_conv1), 'avg_pool(h_conv2, h_conv3)': lambda : avg_pool(h_conv2,
h_conv3)}, name='max_pool')
h_pool2 = nni.function_choice({'max_poo(h_conv1)': lambda : max_poo(h_conv1
), '(2 * 3 + 4)': lambda : 2 * 3 + 4, '(lambda x: 1 + x)': lambda : lambda
x: 1 + x}, name='max_poo')
tmp = nni.qlognormal(1.2, 3, 4.5)
test_acc = 1
nni.report_intermediate_result(test_acc)
test_acc = 2
nni.report_final_result(test_acc)
raise
x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])
with tf.name_scope('conv1'):
W_conv1 = weight_variable([self.conv_size, self.conv_size, 1,
self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
h_conv1 = nni.function_choice({
'tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)': lambda :
tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1),
'tf.nn.sigmoid(conv2d(x_image, W_conv1) + b_conv1)': lambda :
tf.nn.sigmoid(conv2d(x_image, W_conv1) + b_conv1),
'tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1)': lambda :
tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1)}, name=
'tf.nn.relu')
with tf.name_scope('pool1'):
h_pool1 = nni.function_choice({
'max_pool(h_conv1, self.pool_size)': lambda : max_pool(
h_conv1, self.pool_size),
'avg_pool(h_conv1, self.pool_size)': lambda : avg_pool(
h_conv1, self.pool_size)}, name='max_pool')
with tf.name_scope('conv2'):
W_conv2 = weight_variable([self.conv_size, self.conv_size, self
.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, self.pool_size)
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
W_fc1 = weight_variable([last_dim * last_dim * self.
channel_2_num, self.hidden_size])
b_fc1 = bias_variable([self.hidden_size])
import nni
def max_pool(k):
pass
h_conv1 = 1
conv_size = nni.choice({2: 2, 3: 3, 5: 5, 7: 7}, name='conv_size')
abc = nni.choice({'2': '2', 3: 3, '(5 * 6)': 5 * 6, "{(1): 2, '3': 4}": {(1
): 2, '3': 4}, '[1, 2, 3]': [1, 2, 3]}, name='abc')
h_pool1 = nni.function_choice({'max_pool(h_conv1)': lambda : max_pool(
h_conv1), 'avg_pool(h_conv2, h_conv3)': lambda : avg_pool(h_conv2,
h_conv3)}, name='max_pool')
h_pool2 = nni.function_choice({'max_poo(h_conv1)': lambda : max_poo(h_conv1
), '(2 * 3 + 4)': lambda : 2 * 3 + 4, '(lambda x: 1 + x)': lambda : lambda
x: 1 + x}, name='max_poo')
test_acc = 1
nni.report_intermediate_result(test_acc)
test_acc = 2
nni.report_final_result(test_acc)
import nni
def max_pool(k):
pass
h_conv1 = 1
nni.choice({'foo': foo, 'bar': bar})(1)
conv_size = nni.choice({2: 2, 3: 3, 5: 5, 7: 7}, name='conv_size')
abc = nni.choice({'2': '2', 3: 3, '(5 * 6)': 5 * 6, 7: 7}, name='abc')
h_pool1 = nni.function_choice({'max_pool': lambda : max_pool(h_conv1),
'h_conv1': lambda : h_conv1,
'avg_pool': lambda : avg_pool(h_conv2, h_conv3)}
)
h_pool1 = nni.function_choice({'max_pool(h_conv1)': lambda : max_pool(
h_conv1), 'avg_pool(h_conv2, h_conv3)': lambda : avg_pool(h_conv2,
h_conv3)}, name='max_pool')
h_pool2 = nni.function_choice({'max_poo(h_conv1)': lambda : max_poo(h_conv1
), '(2 * 3 + 4)': lambda : 2 * 3 + 4, '(lambda x: 1 + x)': lambda : lambda
x: 1 + x}, name='max_poo')
tmp = nni.qlognormal(1.2, 3, 4.5)
test_acc = 1
nni.report_intermediate_result(test_acc)
test_acc = 2
nni.report_final_result(test_acc)
import nni
def max_pool(k):
pass
h_conv1 = 1
nni.choice({'foo': foo, 'bar': bar})(1)
conv_size = nni.choice({2: 2, 3: 3, 5: 5, 7: 7}, name='conv_size')
abc = nni.choice({'2': '2', 3: 3, '(5 * 6)': 5 * 6, 7: 7}, name='abc')
h_pool1 = nni.function_choice({'max_pool': lambda : max_pool(h_conv1),
'h_conv1': lambda : h_conv1,
'avg_pool': lambda : avg_pool(h_conv2, h_conv3)}
)
h_pool1 = nni.function_choice({'max_pool(h_conv1)': lambda : max_pool(
h_conv1), 'avg_pool(h_conv2, h_conv3)': lambda : avg_pool(h_conv2,
h_conv3)}, name='max_pool')
h_pool2 = nni.function_choice({'max_poo(h_conv1)': lambda : max_poo(h_conv1
), '(2 * 3 + 4)': lambda : 2 * 3 + 4, '(lambda x: 1 + x)': lambda : lambda
x: 1 + x}, name='max_poo')
tmp = nni.qlognormal(1.2, 3, 4.5)
test_acc = 1
nni.report_intermediate_result(test_acc)
test_acc = 2
nni.report_final_result(test_acc)
try:
input_dim = int(math.sqrt(self.x_dim))
except:
print(
'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug(
'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim))
raise
x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
w_conv1 = weight_variable(
[self.conv_size, self.conv_size, 1, self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
h_conv1 = nni.function_choice(
lambda: tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1),
lambda: tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1),
lambda: tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1)
) # example: without name
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool(h_conv1, self.pool_size)
h_pool1 = nni.function_choice(
lambda: max_pool(h_conv1, self.pool_size),
lambda: avg_pool(h_conv1, self.pool_size),
name='h_pool1')
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
with tf.name_scope('reshape'):
try:
input_dim = int(math.sqrt(self.x_dim))
except:
print('input dim cannot be sqrt and reshape. input dim: ' +
str(self.x_dim))
logger.debug(
'input dim cannot be sqrt and reshape. input dim: %s',
str(self.x_dim))
raise
x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])
with tf.name_scope('conv1'):
w_conv1 = weight_variable([self.conv_size, self.conv_size, 1,
self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
h_conv1 = nni.function_choice(lambda : tf.nn.relu(conv2d(
x_image, w_conv1) + b_conv1), lambda : tf.nn.sigmoid(conv2d
(x_image, w_conv1) + b_conv1), lambda : tf.nn.tanh(conv2d(
x_image, w_conv1) + b_conv1), name='tf.nn.relu')
with tf.name_scope('pool1'):
h_pool1 = nni.function_choice(lambda : max_pool(h_conv1, self.
pool_size), lambda : avg_pool(h_conv1, self.pool_size),
name='max_pool')
with tf.name_scope('conv2'):
w_conv2 = weight_variable([self.conv_size, self.conv_size, self
.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, self.pool_size)
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
str(self.x_dim))
logger.debug(
'input dim cannot be sqrt and reshape. input dim: %s',
str(self.x_dim))
raise
x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])
with tf.name_scope('conv1'):
w_conv1 = weight_variable([self.conv_size, self.conv_size, 1,
self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
h_conv1 = nni.function_choice(lambda : tf.nn.relu(conv2d(
x_image, w_conv1) + b_conv1), lambda : tf.nn.sigmoid(conv2d
(x_image, w_conv1) + b_conv1), lambda : tf.nn.tanh(conv2d(
x_image, w_conv1) + b_conv1), name='tf.nn.relu')
with tf.name_scope('pool1'):
h_pool1 = nni.function_choice(lambda : max_pool(h_conv1, self.
pool_size), lambda : avg_pool(h_conv1, self.pool_size),
name='max_pool')
with tf.name_scope('conv2'):
w_conv2 = weight_variable([self.conv_size, self.conv_size, self
.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, self.pool_size)
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
w_fc1 = weight_variable([last_dim * last_dim * self.
channel_2_num, self.hidden_size])
b_fc1 = bias_variable([self.hidden_size])
h_pool2_flat = tf.reshape(h_pool2, [-1, last_dim * last_dim * self.
channel_2_num])