How to use the nni.networkmorphism_tuner.layers.StubDense function in nni

To help you get started, we’ve selected a few nni examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / layer_transformer.py View on Github external
def dense_to_deeper_block(dense_layer, weighted=True):
    '''deeper dense layer.
    '''
    units = dense_layer.units
    weight = np.eye(units)
    bias = np.zeros(units)
    new_dense_layer = StubDense(units, units)
    if weighted:
        new_dense_layer.set_weights(
            (add_noise(weight, np.array([0, 1])),
             add_noise(bias, np.array([0, 1])))
        )
    return [StubReLU(), new_dense_layer]
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / layer_transformer.py View on Github external
rand = np.random.randint(n_units2, size=n_add)
    student_w = teacher_w.copy()
    student_b = teacher_b.copy()

    # target layer update (i)
    for i in range(n_add):
        teacher_index = rand[i]
        new_weight = teacher_w[teacher_index, :]
        new_weight = new_weight[np.newaxis, :]
        student_w = np.concatenate(
            (student_w, add_noise(new_weight, student_w)), axis=0)
        student_b = np.append(
            student_b, add_noise(
                teacher_b[teacher_index], student_b))

    new_pre_layer = StubDense(layer.input_units, n_units2 + n_add)
    new_pre_layer.set_weights((student_w, student_b))

    return new_pre_layer
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / graph_transformer.py View on Github external
def create_new_layer(layer, n_dim):
    ''' create  new layer for the graph
    '''

    input_shape = layer.output.shape
    dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU]
    conv_deeper_classes = [
        get_conv_class(n_dim),
        get_batch_norm_class(n_dim),
        StubReLU]
    if is_layer(layer, "ReLU"):
        conv_deeper_classes = [
            get_conv_class(n_dim),
            get_batch_norm_class(n_dim)]
        dense_deeper_classes = [StubDense, get_dropout_class(n_dim)]
    elif is_layer(layer, "Dropout"):
        dense_deeper_classes = [StubDense, StubReLU]
    elif is_layer(layer, "BatchNormalization"):
        conv_deeper_classes = [get_conv_class(n_dim), StubReLU]

    layer_class = None
    if len(input_shape) == 1:
        # It is in the dense layer part.
        layer_class = sample(dense_deeper_classes, 1)[0]
    else:
        # It is in the conv layer part.
        layer_class = sample(conv_deeper_classes, 1)[0]

    if layer_class == StubDense:
        new_layer = StubDense(input_shape[0], input_shape[0])
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / layer_transformer.py View on Github external
def wider_pre_dense(layer, n_add, weighted=True):
    '''wider previous dense layer.
    '''
    if not weighted:
        return StubDense(layer.input_units, layer.units + n_add)

    n_units2 = layer.units

    teacher_w, teacher_b = layer.get_weights()
    rand = np.random.randint(n_units2, size=n_add)
    student_w = teacher_w.copy()
    student_b = teacher_b.copy()

    # target layer update (i)
    for i in range(n_add):
        teacher_index = rand[i]
        new_weight = teacher_w[teacher_index, :]
        new_weight = new_weight[np.newaxis, :]
        student_w = np.concatenate(
            (student_w, add_noise(new_weight, student_w)), axis=0)
        student_b = np.append(
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / nn.py View on Github external
if model_len is None:
            model_len = Constant.MODEL_LEN
        if model_width is None:
            model_width = Constant.MODEL_WIDTH
        if isinstance(model_width, list) and not len(model_width) == model_len:
            raise ValueError(
                "The length of 'model_width' does not match 'model_len'")
        elif isinstance(model_width, int):
            model_width = [model_width] * model_len

        graph = Graph(self.input_shape, False)
        output_node_id = 0
        n_nodes_prev_layer = self.input_shape[0]
        for width in model_width:
            output_node_id = graph.add_layer(
                StubDense(n_nodes_prev_layer, width), output_node_id
            )
            output_node_id = graph.add_layer(
                StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id
            )
            output_node_id = graph.add_layer(StubReLU(), output_node_id)
            n_nodes_prev_layer = width

        graph.add_layer(
            StubDense(
                n_nodes_prev_layer,
                self.n_output_node),
            output_node_id)
        return graph
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / graph_transformer.py View on Github external
dense_deeper_classes = [StubDense, get_dropout_class(n_dim)]
    elif is_layer(layer, "Dropout"):
        dense_deeper_classes = [StubDense, StubReLU]
    elif is_layer(layer, "BatchNormalization"):
        conv_deeper_classes = [get_conv_class(n_dim), StubReLU]

    layer_class = None
    if len(input_shape) == 1:
        # It is in the dense layer part.
        layer_class = sample(dense_deeper_classes, 1)[0]
    else:
        # It is in the conv layer part.
        layer_class = sample(conv_deeper_classes, 1)[0]

    if layer_class == StubDense:
        new_layer = StubDense(input_shape[0], input_shape[0])

    elif layer_class == get_dropout_class(n_dim):
        new_layer = layer_class(Constant.DENSE_DROPOUT_RATE)

    elif layer_class == get_conv_class(n_dim):
        new_layer = layer_class(
            input_shape[-1], input_shape[-1], sample((1, 3, 5), 1)[0], stride=1
        )

    elif layer_class == get_batch_norm_class(n_dim):
        new_layer = layer_class(input_shape[-1])

    elif layer_class == get_pooling_class(n_dim):
        new_layer = layer_class(sample((1, 3, 5), 1)[0])

    else:
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / nn.py View on Github external
graph = Graph(self.input_shape, False)
        output_node_id = 0
        n_nodes_prev_layer = self.input_shape[0]
        for width in model_width:
            output_node_id = graph.add_layer(
                StubDense(n_nodes_prev_layer, width), output_node_id
            )
            output_node_id = graph.add_layer(
                StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id
            )
            output_node_id = graph.add_layer(StubReLU(), output_node_id)
            n_nodes_prev_layer = width

        graph.add_layer(
            StubDense(
                n_nodes_prev_layer,
                self.n_output_node),
            output_node_id)
        return graph
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / layer_transformer.py View on Github external
student_w = teacher_w.copy()
    n_units_each_channel = int(teacher_w.shape[1] / total_dim)

    new_weight = np.zeros((teacher_w.shape[0], n_add * n_units_each_channel))
    student_w = np.concatenate(
        (
            student_w[:, : start_dim * n_units_each_channel],
            add_noise(new_weight, student_w),
            student_w[
                :, start_dim * n_units_each_channel: total_dim * n_units_each_channel
            ],
        ),
        axis=1,
    )

    new_layer = StubDense(layer.input_units + n_add, layer.units)
    new_layer.set_weights((student_w, teacher_b))
    return new_layer
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / layer_transformer.py View on Github external
def wider_next_dense(layer, start_dim, total_dim, n_add, weighted=True):
    '''wider next dense layer.
    '''
    if not weighted:
        return StubDense(layer.input_units + n_add, layer.units)
    teacher_w, teacher_b = layer.get_weights()
    student_w = teacher_w.copy()
    n_units_each_channel = int(teacher_w.shape[1] / total_dim)

    new_weight = np.zeros((teacher_w.shape[0], n_add * n_units_each_channel))
    student_w = np.concatenate(
        (
            student_w[:, : start_dim * n_units_each_channel],
            add_noise(new_weight, student_w),
            student_w[
                :, start_dim * n_units_each_channel: total_dim * n_units_each_channel
            ],
        ),
        axis=1,
    )