How to use the autokeras.nn.layers.get_conv_class function in autokeras

To help you get started, we’ve selected a few autokeras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github keras-team / autokeras / autokeras / nn / layer_transformer.py View on Github external
def wider_pre_conv(layer, n_add_filters, weighted=True):
    n_dim = get_n_dim(layer)
    if not weighted:
        return get_conv_class(n_dim)(layer.input_channel,
                                     layer.filters + n_add_filters,
                                     kernel_size=layer.kernel_size,
                                     stride=layer.stride)

    n_pre_filters = layer.filters
    rand = np.random.randint(n_pre_filters, size=n_add_filters)
    teacher_w, teacher_b = layer.get_weights()

    student_w = teacher_w.copy()
    student_b = teacher_b.copy()
    # target layer update (i)
    for i in range(len(rand)):
        teacher_index = rand[i]
        new_weight = teacher_w[teacher_index, ...]
        new_weight = new_weight[np.newaxis, ...]
        student_w = np.concatenate((student_w, new_weight), axis=0)
github keras-team / autokeras / autokeras / net_transformer.py View on Github external
def create_new_layer(layer, n_dim):
    input_shape = layer.output.shape
    dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU]
    conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim), StubReLU]
    if is_layer(layer, LayerType.RELU):
        conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim)]
        dense_deeper_classes = [StubDense, get_dropout_class(n_dim)]
    elif is_layer(layer, LayerType.DROPOUT):
        dense_deeper_classes = [StubDense, StubReLU]
    elif is_layer(layer, LayerType.BATCH_NORM):
        conv_deeper_classes = [get_conv_class(n_dim), StubReLU]

    if len(input_shape) == 1:
        # It is in the dense layer part.
        layer_class = sample(dense_deeper_classes, 1)[0]
    else:
        # It is in the conv layer part.
        layer_class = sample(conv_deeper_classes, 1)[0]

    if layer_class == StubDense:
github keras-team / autokeras / autokeras / nn / graph.py View on Github external
Args:
            start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.
        """
        self.operation_history.append(('to_add_skip_model', start_id, end_id))
        filters_end = self.layer_list[end_id].output.shape[-1]
        filters_start = self.layer_list[start_id].output.shape[-1]
        start_node_id = self.layer_id_to_output_node_ids[start_id][0]

        pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0]
        end_node_id = self.layer_id_to_output_node_ids[end_id][0]

        skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id)

        # Add the conv layer
        new_conv_layer = get_conv_class(self.n_dim)(filters_start,
                                                    filters_end,
                                                    1)
        skip_output_id = self.add_layer(new_conv_layer, skip_output_id)

        # Add the add layer.
        add_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id]))
        add_layer = StubAdd()

        self._redirect_edge(pre_end_node_id, end_node_id, add_input_node_id)
        self._add_edge(add_layer, add_input_node_id, end_node_id)
        self._add_edge(add_layer, skip_output_id, end_node_id)
        add_layer.input = [self.node_list[add_input_node_id], self.node_list[skip_output_id]]
        add_layer.output = self.node_list[end_node_id]
        self.node_list[end_node_id].shape = add_layer.output_shape

        # Set weights to the additional conv layer.
github keras-team / autokeras / autokeras / nn / graph.py View on Github external
def _insert_pooling_layer_chain(self, start_node_id, end_node_id):
        skip_output_id = start_node_id
        for layer in self._get_pooling_layers(start_node_id, end_node_id):
            new_layer = deepcopy(layer)
            if is_layer(new_layer, LayerType.CONV):
                filters = self.node_list[start_node_id].shape[-1]
                kernel_size = layer.kernel_size if layer.padding != int(
                    layer.kernel_size / 2) or layer.stride != 1 else 1
                new_layer = get_conv_class(self.n_dim)(filters, filters, kernel_size, layer.stride,
                                                       padding=layer.padding)
                if self.weighted:
                    init_conv_weight(new_layer)
            else:
                new_layer = deepcopy(layer)
            skip_output_id = self.add_layer(new_layer, skip_output_id)
        skip_output_id = self.add_layer(StubReLU(), skip_output_id)
        return skip_output_id
github keras-team / autokeras / autokeras / nn / layer_transformer.py View on Github external
stride=layer.stride)

    n_pre_filters = layer.filters
    rand = np.random.randint(n_pre_filters, size=n_add_filters)
    teacher_w, teacher_b = layer.get_weights()

    student_w = teacher_w.copy()
    student_b = teacher_b.copy()
    # target layer update (i)
    for i in range(len(rand)):
        teacher_index = rand[i]
        new_weight = teacher_w[teacher_index, ...]
        new_weight = new_weight[np.newaxis, ...]
        student_w = np.concatenate((student_w, new_weight), axis=0)
        student_b = np.append(student_b, teacher_b[teacher_index])
    new_pre_layer = get_conv_class(n_dim)(layer.input_channel,
                                          n_pre_filters + n_add_filters,
                                          kernel_size=layer.kernel_size,
                                          stride=layer.stride)
    new_pre_layer.set_weights((add_noise(student_w, teacher_w), add_noise(student_b, teacher_b)))
    return new_pre_layer
github keras-team / autokeras / autokeras / nn / layer_transformer.py View on Github external
def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True):
    n_dim = get_n_dim(layer)
    if not weighted:
        return get_conv_class(n_dim)(layer.input_channel + n_add,
                                     layer.filters,
                                     kernel_size=layer.kernel_size,
                                     stride=layer.stride)
    n_filters = layer.filters
    teacher_w, teacher_b = layer.get_weights()

    new_weight_shape = list(teacher_w.shape)
    new_weight_shape[1] = n_add
    new_weight = np.zeros(tuple(new_weight_shape))

    student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(),
                                add_noise(new_weight, teacher_w),
                                teacher_w[:, start_dim:total_dim, ...].copy()), axis=1)
    new_layer = get_conv_class(n_dim)(layer.input_channel + n_add,
                                      n_filters,
                                      kernel_size=layer.kernel_size,
github keras-team / autokeras / autokeras / nn / layer_transformer.py View on Github external
if not weighted:
        return get_conv_class(n_dim)(layer.input_channel + n_add,
                                     layer.filters,
                                     kernel_size=layer.kernel_size,
                                     stride=layer.stride)
    n_filters = layer.filters
    teacher_w, teacher_b = layer.get_weights()

    new_weight_shape = list(teacher_w.shape)
    new_weight_shape[1] = n_add
    new_weight = np.zeros(tuple(new_weight_shape))

    student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(),
                                add_noise(new_weight, teacher_w),
                                teacher_w[:, start_dim:total_dim, ...].copy()), axis=1)
    new_layer = get_conv_class(n_dim)(layer.input_channel + n_add,
                                      n_filters,
                                      kernel_size=layer.kernel_size,
                                      stride=layer.stride)
    new_layer.set_weights((student_w, teacher_b))
    return new_layer