Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
model.add_module(
"conv_3",
nn.Conv2d(
n_filters_2,
n_filters_3,
self.third_kernel_size,
stride=1,
padding=(self.third_kernel_size[0] // 2, 0),
bias=True,
),
)
model.add_module(
"bnorm_3",
nn.BatchNorm2d(n_filters_3, momentum=0.01, affine=True, eps=1e-3),
)
model.add_module("elu_3", Expression(elu))
model.add_module(
"pool_3", pool_class(kernel_size=(2, 4), stride=(2, 4))
)
model.add_module("drop_3", nn.Dropout(p=self.drop_prob))
out = model(
np_to_var(
np.ones(
(1, self.in_chans, self.input_time_length, 1),
dtype=np.float32,
)
)
)
n_out_virtual_chans = out.cpu().data.numpy().shape[2]
if self.final_conv_length == "auto":
n_out_time = out.cpu().data.numpy().shape[3]
self.final_conv_length = n_out_time
model.add_module(
"conv_classifier",
nn.Conv2d(
self.F2,
self.n_classes,
(n_out_virtual_chans, self.final_conv_length),
bias=True,
),
)
model.add_module("softmax", nn.LogSoftmax())
# Transpose back to the the logic of braindecode,
# so time in third dimension (axis=2)
model.add_module("permute_back", Expression(_transpose_1_0))
model.add_module("squeeze", Expression(_squeeze_final_output))
glorot_weight_zero_bias(model)
return model
if self.batch_norm:
model.add_module(
"bnorm",
nn.BatchNorm2d(
n_filters_conv, momentum=self.batch_norm_alpha, affine=True
),
)
model.add_module("conv_nonlin", Expression(self.conv_nonlin))
model.add_module(
"pool",
pool_class(
kernel_size=(self.pool_time_length, 1),
stride=(self.pool_time_stride, 1),
),
)
model.add_module("pool_nonlin", Expression(self.pool_nonlin))
model.add_module("drop", nn.Dropout(p=self.drop_prob))
model.eval()
if self.final_conv_length == "auto":
out = model(
np_to_var(
np.ones(
(1, self.in_chans, self.input_time_length, 1),
dtype=np.float32,
)
)
)
n_out_time = out.cpu().data.numpy().shape[2]
self.final_conv_length = n_out_time
model.add_module(
"conv_classifier",
nn.Conv2d(
self.final_conv_length = n_out_time
model.add_module(
"conv_classifier",
nn.Conv2d(
n_filters_3,
self.n_classes,
(n_out_virtual_chans, self.final_conv_length),
bias=True,
),
)
model.add_module("softmax", nn.LogSoftmax())
# Transpose back to the the logic of braindecode,
# so time in third dimension (axis=2)
model.add_module(
"permute_2", Expression(lambda x: x.permute(0, 1, 3, 2))
)
model.add_module("squeeze", Expression(_squeeze_final_output))
glorot_weight_zero_bias(model)
return model
)
)
)
n_out_time = out.cpu().data.numpy().shape[2]
self.final_conv_length = n_out_time
model.add_module(
"conv_classifier",
nn.Conv2d(
self.n_filters_4,
self.n_classes,
(self.final_conv_length, 1),
bias=True,
),
)
model.add_module("softmax", nn.LogSoftmax(dim=1))
model.add_module("squeeze", Expression(_squeeze_final_output))
# Initialization, xavier is same as in our paper...
# was default from lasagne
init.xavier_uniform_(model.conv_time.weight, gain=1)
# maybe no bias in case of no split layer and batch norm
if self.split_first_layer or (not self.batch_norm):
init.constant_(model.conv_time.bias, 0)
if self.split_first_layer:
init.xavier_uniform_(model.conv_spat.weight, gain=1)
if not self.batch_norm:
init.constant_(model.conv_spat.bias, 0)
if self.batch_norm:
init.constant_(model.bnorm.weight, 1)
init.constant_(model.bnorm.bias, 0)
param_dict = dict(list(model.named_parameters()))
for block_nr in range(2, 5):
"bnorm",
nn.BatchNorm2d(
n_filters_conv,
momentum=self.batch_norm_alpha,
affine=True,
eps=1e-5,
),
)
model.add_module("conv_nonlin", Expression(self.first_nonlin))
model.add_module(
"pool",
first_pool_class(
kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)
),
)
model.add_module("pool_nonlin", Expression(self.first_pool_nonlin))
def add_conv_pool_block(
model, n_filters_before, n_filters, filter_length, block_nr
):
suffix = "_{:d}".format(block_nr)
model.add_module("drop" + suffix, nn.Dropout(p=self.drop_prob))
model.add_module(
"conv" + suffix,
nn.Conv2d(
n_filters_before,
n_filters,
(filter_length, 1),
stride=(conv_stride, 1),
bias=not self.batch_norm,
),
)
model.add_module(
"conv_classifier",
nn.Conv2d(
n_filters_3,
self.n_classes,
(n_out_virtual_chans, self.final_conv_length),
bias=True,
),
)
model.add_module("softmax", nn.LogSoftmax())
# Transpose back to the the logic of braindecode,
# so time in third dimension (axis=2)
model.add_module(
"permute_2", Expression(lambda x: x.permute(0, 1, 3, 2))
)
model.add_module("squeeze", Expression(_squeeze_final_output))
glorot_weight_zero_bias(model)
return model
(filter_length, 1),
stride=(conv_stride, 1),
bias=not self.batch_norm,
),
)
if self.batch_norm:
model.add_module(
"bnorm" + suffix,
nn.BatchNorm2d(
n_filters,
momentum=self.batch_norm_alpha,
affine=True,
eps=1e-5,
),
)
model.add_module("nonlin" + suffix, Expression(self.later_nonlin))
model.add_module(
"pool" + suffix,
later_pool_class(
kernel_size=(self.pool_time_length, 1),
stride=(pool_stride, 1),
),
)
model.add_module(
"pool_nonlin" + suffix, Expression(self.later_pool_nonlin)
)
def create_network(self):
if self.stride_before_pool:
conv_stride = self.pool_time_stride
pool_stride = 1
else:
conv_stride = 1
pool_stride = self.pool_time_stride
pool_class_dict = dict(max=nn.MaxPool2d, mean=AvgPool2dWithConv)
first_pool_class = pool_class_dict[self.first_pool_mode]
later_pool_class = pool_class_dict[self.later_pool_mode]
model = nn.Sequential()
if self.split_first_layer:
model.add_module("dimshuffle", Expression(_transpose_time_to_spat))
model.add_module(
"conv_time",
nn.Conv2d(
1,
self.n_filters_time,
(self.filter_time_length, 1),
stride=1,
),
)
model.add_module(
"conv_spat",
nn.Conv2d(
self.n_filters_time,
self.n_filters_spat,
(1, self.in_chans),
stride=(conv_stride, 1),
)
)
)
n_out_time = out.cpu().data.numpy().shape[2]
self.final_conv_length = n_out_time
model.add_module(
"conv_classifier",
nn.Conv2d(
n_filters_conv,
self.n_classes,
(self.final_conv_length, 1),
bias=True,
),
)
model.add_module("softmax", nn.LogSoftmax(dim=1))
model.add_module("squeeze", Expression(_squeeze_final_output))
# Initialization, xavier is same as in paper...
init.xavier_uniform_(model.conv_time.weight, gain=1)
# maybe no bias in case of no split layer and batch norm
if self.split_first_layer or (not self.batch_norm):
init.constant_(model.conv_time.bias, 0)
if self.split_first_layer:
init.xavier_uniform_(model.conv_spat.weight, gain=1)
if not self.batch_norm:
init.constant_(model.conv_spat.bias, 0)
if self.batch_norm:
init.constant_(model.bnorm.weight, 1)
init.constant_(model.bnorm.bias, 0)
init.xavier_uniform_(model.conv_classifier.weight, gain=1)
init.constant_(model.conv_classifier.bias, 0)