How to use the autokeras.constant.Constant function in autokeras

To help you get started, we’ve selected a few autokeras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github keras-team / autokeras / tests / image / test_dcgan.py View on Github external
def test_fit_generate(_):
    Constant.MAX_ITER_NUM = 1
    Constant.MAX_MODEL_NUM = 4
    Constant.SEARCH_MAX_ITER = 1
    Constant.T_MIN = 0.8
    Constant.DATA_AUGMENTATION = False
    image_path, size = TEST_TEMP_DIR, 32
    clean_dir(image_path)
    dcgan = DCGAN(gen_training_result=(image_path, size))
    train_x = np.random.rand(100, 32, 32, 3)
    dcgan.fit(train_x)
    clean_dir(image_path)
    noise = np.random.randn(32, 100, 1, 1).astype('float32')
    dcgan.generate(noise)
github keras-team / autokeras / autokeras / supervised.py View on Github external
def fit(self, x, y, trainer_args=None, retrain=False):
        """Trains the model on the given dataset.

        Args:
            x: A numpy.ndarray instance containing the training data or the training data combined with the
               validation data.
            y: A numpy.ndarray instance containing the label of the training data. or the label of the training data
               combined with the validation label.
            trainer_args: A dictionary containing the parameters of the ModelTrainer constructor.
            retrain: A boolean of whether reinitialize the weights of the model.
        """
        x = self.preprocess(x)
        # Divide training data into training and testing data.
        validation_set_size = int(len(y) * Constant.VALIDATION_SET_SIZE)
        validation_set_size = min(validation_set_size, 500)
        validation_set_size = max(validation_set_size, 1)
        x_train, x_test, y_train, y_test = train_test_split(x, y,
                                                            test_size=validation_set_size,
                                                            random_state=42)
        if trainer_args is None:
            trainer_args = {'max_no_improvement_num': 30}

        y_train = self.transform_y(y_train)
        y_test = self.transform_y(y_test)

        train_data = self.data_transformer.transform_train(x_train, y_train)
        test_data = self.data_transformer.transform_test(x_test, y_test)

        if retrain:
            self.graph.weighted = False
github keras-team / autokeras / autokeras / predefined_model.py View on Github external
"""Trains the model on the dataset given.

        Args:
            x_train: A numpy.ndarray instance containing the training data,
                or the training data combined with the validation data.
            y_train: A numpy.ndarray instance containing the label of the training data,
                or the label of the training data combined with the validation label.
            time_limit: A dictionary containing the parameters of the ModelTrainer constructor.
        """
        validate_xy(x_train, y_train)
        self.resize_shape = compute_image_resize_params(x_train)
        x_train = self.preprocess(x_train)
        self.y_encoder.fit(y_train)
        y_train = self.transform_y(y_train)
        # Divide training data into training and testing data.
        validation_set_size = int(len(y_train) * Constant.VALIDATION_SET_SIZE)
        validation_set_size = min(validation_set_size, 500)
        validation_set_size = max(validation_set_size, 1)
        x_train_new, x_test, y_train_new, y_test = train_test_split(x_train, y_train,
                                                                    test_size=validation_set_size,
                                                                    random_state=42)

        # initialize data_transformer
        self.data_transformer = Backend.get_image_transformer(x_train)
        # Wrap the data into DataLoaders
        train_loader = self.data_transformer.transform_train(x_train_new, y_train_new)
        test_loader = self.data_transformer.transform_test(x_test, y_test)

        self.generator = self._init_generator(self.y_encoder.n_classes, x_train_new.shape[1:])
        graph = self.generator.generate()

        if time_limit is None:
github keras-team / autokeras / autokeras / nn / generator.py View on Github external
def generate(self, model_len=None, model_width=None):
        if model_width is None:
            model_width = Constant.MODEL_WIDTH
        graph = Graph(self.input_shape, False)
        temp_input_channel = self.input_shape[-1]
        output_node_id = 0
        output_node_id = graph.add_layer(self.conv(temp_input_channel, model_width, kernel_size=3), output_node_id)
        output_node_id = graph.add_layer(self.batch_norm(model_width), output_node_id)
        output_node_id = graph.add_layer(StubReLU(), output_node_id)
        # output_node_id = graph.add_layer(self.pooling(kernel_size=3, stride=2, padding=1), output_node_id)

        output_node_id = self._make_layer(graph, model_width, self.layers[0], output_node_id, 1)
        model_width *= 2
        output_node_id = self._make_layer(graph, model_width, self.layers[1], output_node_id, 2)
        model_width *= 2
        output_node_id = self._make_layer(graph, model_width, self.layers[2], output_node_id, 2)
        model_width *= 2
        output_node_id = self._make_layer(graph, model_width, self.layers[3], output_node_id, 2)
github keras-team / autokeras / autokeras / text / text_preprocessor.py View on Github external
Args:
        path: String, path to store the pretrain files.
        word_index: Dictionary contains word with tokenlized index.

    Returns:
        embedding_matrix: Numpy array as the pretrain model embedding layer weights.
    """
    print("loading pretrain weights...")
    file_path = os.path.join(path, Constant.FILE_PATH)
    extract_path = os.path.join(path, Constant.EXTRACT_PATH)
    download_pre_train(file_path=file_path, extract_path=extract_path)
    embedding_index = read_embedding_index(extract_path)
    print('Total %s word vectors embedded.' % len(embedding_index))

    # convert the pretrained embedding index to weights
    embedding_matrix = np.random.random((len(word_index) + 1, Constant.EMBEDDING_DIM))
    for word, i in word_index.items():
        embedding_vector = embedding_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = embedding_vector
    return embedding_matrix
github keras-team / autokeras / autokeras / image / image_supervised.py View on Github external
The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
        Otherwise it would create a new one.
        
        Args:
            augment: A boolean value indicating whether the data needs augmentation. If not defined, then it
                will use the value of Constant.DATA_AUGMENTATION which is True by default.
                
            **kwargs: Needed for using the __init__() function of ImageSupervised's superclass
                verbose: A boolean of whether the search process will be printed to stdout.
                path: A string of the path to a directory where the intermediate results are saved.
                resume: A boolean. If True, the classifier will continue to previous work saved in path.
                    Otherwise, the classifier will start a new search.
                searcher_args: A dictionary containing the parameters for the searcher's __init__ function.
        """
        self.augment = augment if augment is not None else Constant.DATA_AUGMENTATION
        self.resize_shape = []

        super().__init__(**kwargs)
github keras-team / autokeras / autokeras / backend / torch / model_trainer.py View on Github external
max_no_improvement_num=None,
                    timeout=None):
        """Train the model.
        Train the model with max_iter_num or max_no_improvement_num is met.
        Args:
            lr: learning rate of the traininig
            timeout: timeout in seconds
            max_iter_num: An integer. The maximum number of epochs to train the model.
                The training will stop when this number is reached.
            max_no_improvement_num: An integer. The maximum number of epochs when the loss value doesn't decrease.
                The training will stop when this number is reached.
        Returns:
            A tuple of loss values and metric value.
        """
        if max_iter_num is None:
            max_iter_num = Constant.MAX_ITER_NUM

        if max_no_improvement_num is None:
            max_no_improvement_num = Constant.MAX_NO_IMPROVEMENT_NUM

        self.early_stop = EarlyStop(max_no_improvement_num)
        self.early_stop.on_train_begin()
        self._timeout = time.time() + timeout if timeout is not None else sys.maxsize

        test_metric_value_list = []
        test_loss_list = []
        self.optimizer = torch.optim.SGD(
            self.model.parameters(),
            lr=lr,
            momentum=0.9,
            weight_decay=3e-4)
        # self.optimizer = torch.optim.Adam(self.model.parameters())
github keras-team / autokeras / autokeras / nn / generator.py View on Github external
"""
        if model_len is None:
            model_len = Constant.MODEL_LEN
        if model_width is None:
            model_width = Constant.MODEL_WIDTH
        if isinstance(model_width, list) and not len(model_width) == model_len:
            raise ValueError('The length of \'model_width\' does not match \'model_len\'')
        elif isinstance(model_width, int):
            model_width = [model_width] * model_len

        graph = Graph(self.input_shape, False)
        output_node_id = 0
        n_nodes_prev_layer = self.input_shape[0]
        for width in model_width:
            output_node_id = graph.add_layer(StubDense(n_nodes_prev_layer, width), output_node_id)
            output_node_id = graph.add_layer(StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id)
            output_node_id = graph.add_layer(StubReLU(), output_node_id)
            n_nodes_prev_layer = width

        graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)
        return graph
github keras-team / autokeras / autokeras / net_transformer.py View on Github external
def transform(graph, skip_conn=True):

    graphs = []
    for _ in range(Constant.N_NEIGHBOURS * 2):
        a = randrange(3 if skip_conn else 2)

        temp_graph = None
        if a == 0:
            temp_graph = to_deeper_graph(deepcopy(graph))
        elif a == 1:
            temp_graph = to_wider_graph(deepcopy(graph))
        elif a == 2:
            temp_graph = to_skip_connection_graph(deepcopy(graph))

        if temp_graph is not None and temp_graph.size() <= Constant.MAX_MODEL_SIZE:
            graphs.append(temp_graph)

        if len(graphs) >= Constant.N_NEIGHBOURS:
            break
github keras-team / autokeras / autokeras / pretrained / face_detector.py View on Github external
def _google_drive_files(self):
        return Constant.FACE_DETECTOR_MODELS