How to use the autokeras.pretrained.voice_generator.voice_generator.Hparams function in autokeras

To help you get started, we’ve selected a few autokeras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github keras-team / autokeras / autokeras / pretrained / voice_generator / voice_generator.py View on Github external
def build_model():
    model = getattr(builder, Hparams.builder)(
        n_speakers=Hparams.n_speakers,
        speaker_embed_dim=Hparams.speaker_embed_dim,
        n_vocab=frontend.n_vocab,
        embed_dim=Hparams.text_embed_dim,
        mel_dim=Hparams.num_mels,
        linear_dim=Hparams.fft_size // 2 + 1,
        r=Hparams.outputs_per_step,
        padding_idx=Hparams.padding_idx,
        dropout=Hparams.dropout,
        kernel_size=Hparams.kernel_size,
        encoder_channels=Hparams.encoder_channels,
        decoder_channels=Hparams.decoder_channels,
        converter_channels=Hparams.converter_channels,
        use_memory_mask=Hparams.use_memory_mask,
        trainable_positional_encodings=Hparams.trainable_positional_encodings,
        force_monotonic_attention=Hparams.force_monotonic_attention,
        use_decoder_state_for_postnet_input=Hparams.use_decoder_state_for_postnet_input,
        max_positions=Hparams.max_positions,
        freeze_embedding=Hparams.freeze_embedding,
        window_ahead=Hparams.window_ahead,
        window_backward=Hparams.window_backward
    )
    return model
github keras-team / autokeras / autokeras / pretrained / voice_generator / voice_generator.py View on Github external
def build_model():
    model = getattr(builder, Hparams.builder)(
        n_speakers=Hparams.n_speakers,
        speaker_embed_dim=Hparams.speaker_embed_dim,
        n_vocab=frontend.n_vocab,
        embed_dim=Hparams.text_embed_dim,
        mel_dim=Hparams.num_mels,
        linear_dim=Hparams.fft_size // 2 + 1,
        r=Hparams.outputs_per_step,
        padding_idx=Hparams.padding_idx,
        dropout=Hparams.dropout,
        kernel_size=Hparams.kernel_size,
        encoder_channels=Hparams.encoder_channels,
        decoder_channels=Hparams.decoder_channels,
        converter_channels=Hparams.converter_channels,
        use_memory_mask=Hparams.use_memory_mask,
        trainable_positional_encodings=Hparams.trainable_positional_encodings,
        force_monotonic_attention=Hparams.force_monotonic_attention,
        use_decoder_state_for_postnet_input=Hparams.use_decoder_state_for_postnet_input,
        max_positions=Hparams.max_positions,
        freeze_embedding=Hparams.freeze_embedding,
        window_ahead=Hparams.window_ahead,
        window_backward=Hparams.window_backward
    )
    return model
github keras-team / autokeras / autokeras / pretrained / voice_generator / voice_generator.py View on Github external
def build_model():
    model = getattr(builder, Hparams.builder)(
        n_speakers=Hparams.n_speakers,
        speaker_embed_dim=Hparams.speaker_embed_dim,
        n_vocab=frontend.n_vocab,
        embed_dim=Hparams.text_embed_dim,
        mel_dim=Hparams.num_mels,
        linear_dim=Hparams.fft_size // 2 + 1,
        r=Hparams.outputs_per_step,
        padding_idx=Hparams.padding_idx,
        dropout=Hparams.dropout,
        kernel_size=Hparams.kernel_size,
        encoder_channels=Hparams.encoder_channels,
        decoder_channels=Hparams.decoder_channels,
        converter_channels=Hparams.converter_channels,
        use_memory_mask=Hparams.use_memory_mask,
        trainable_positional_encodings=Hparams.trainable_positional_encodings,
        force_monotonic_attention=Hparams.force_monotonic_attention,
        use_decoder_state_for_postnet_input=Hparams.use_decoder_state_for_postnet_input,
        max_positions=Hparams.max_positions,
        freeze_embedding=Hparams.freeze_embedding,
        window_ahead=Hparams.window_ahead,
        window_backward=Hparams.window_backward
    )
    return model
github keras-team / autokeras / autokeras / pretrained / voice_generator / voice_generator.py View on Github external
def build_model():
    model = getattr(builder, Hparams.builder)(
        n_speakers=Hparams.n_speakers,
        speaker_embed_dim=Hparams.speaker_embed_dim,
        n_vocab=frontend.n_vocab,
        embed_dim=Hparams.text_embed_dim,
        mel_dim=Hparams.num_mels,
        linear_dim=Hparams.fft_size // 2 + 1,
        r=Hparams.outputs_per_step,
        padding_idx=Hparams.padding_idx,
        dropout=Hparams.dropout,
        kernel_size=Hparams.kernel_size,
        encoder_channels=Hparams.encoder_channels,
        decoder_channels=Hparams.decoder_channels,
        converter_channels=Hparams.converter_channels,
        use_memory_mask=Hparams.use_memory_mask,
        trainable_positional_encodings=Hparams.trainable_positional_encodings,
        force_monotonic_attention=Hparams.force_monotonic_attention,
        use_decoder_state_for_postnet_input=Hparams.use_decoder_state_for_postnet_input,
        max_positions=Hparams.max_positions,
        freeze_embedding=Hparams.freeze_embedding,
        window_ahead=Hparams.window_ahead,
github keras-team / autokeras / autokeras / pretrained / voice_generator / voice_generator.py View on Github external
def build_model():
    model = getattr(builder, Hparams.builder)(
        n_speakers=Hparams.n_speakers,
        speaker_embed_dim=Hparams.speaker_embed_dim,
        n_vocab=frontend.n_vocab,
        embed_dim=Hparams.text_embed_dim,
        mel_dim=Hparams.num_mels,
        linear_dim=Hparams.fft_size // 2 + 1,
        r=Hparams.outputs_per_step,
        padding_idx=Hparams.padding_idx,
        dropout=Hparams.dropout,
        kernel_size=Hparams.kernel_size,
        encoder_channels=Hparams.encoder_channels,
        decoder_channels=Hparams.decoder_channels,
        converter_channels=Hparams.converter_channels,
        use_memory_mask=Hparams.use_memory_mask,
        trainable_positional_encodings=Hparams.trainable_positional_encodings,
        force_monotonic_attention=Hparams.force_monotonic_attention,
        use_decoder_state_for_postnet_input=Hparams.use_decoder_state_for_postnet_input,
        max_positions=Hparams.max_positions,
        freeze_embedding=Hparams.freeze_embedding,
        window_ahead=Hparams.window_ahead,
        window_backward=Hparams.window_backward
    )
    return model
github keras-team / autokeras / autokeras / pretrained / voice_generator / voice_generator.py View on Github external
def build_model():
    model = getattr(builder, Hparams.builder)(
        n_speakers=Hparams.n_speakers,
        speaker_embed_dim=Hparams.speaker_embed_dim,
        n_vocab=frontend.n_vocab,
        embed_dim=Hparams.text_embed_dim,
        mel_dim=Hparams.num_mels,
        linear_dim=Hparams.fft_size // 2 + 1,
        r=Hparams.outputs_per_step,
        padding_idx=Hparams.padding_idx,
        dropout=Hparams.dropout,
        kernel_size=Hparams.kernel_size,
        encoder_channels=Hparams.encoder_channels,
        decoder_channels=Hparams.decoder_channels,
        converter_channels=Hparams.converter_channels,
        use_memory_mask=Hparams.use_memory_mask,
        trainable_positional_encodings=Hparams.trainable_positional_encodings,
        force_monotonic_attention=Hparams.force_monotonic_attention,
github keras-team / autokeras / autokeras / pretrained / voice_generator / voice_generator.py View on Github external
def _lws_processor():
    return lws.lws(Hparams.fft_size, Hparams.hop_size, mode="speech")
github keras-team / autokeras / autokeras / pretrained / voice_generator / voice_generator.py View on Github external
def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.sample_rate = 0
        self.hop_length = 0
        self.sample_rate = Hparams.sample_rate
        self.hop_length = Hparams.hop_size

        self.model = self.load_checkpoint()
        self.model.to(self.device)
github keras-team / autokeras / autokeras / pretrained / voice_generator / voice_generator.py View on Github external
def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.sample_rate = 0
        self.hop_length = 0
        self.sample_rate = Hparams.sample_rate
        self.hop_length = Hparams.hop_size

        self.model = self.load_checkpoint()
        self.model.to(self.device)
github keras-team / autokeras / autokeras / pretrained / voice_generator / voice_generator.py View on Github external
def inv_preemphasis(x, coef=Hparams.preemphasis):
    """Inverse operation of pre-emphasis

    Args:
        x (1d-array): Input signal.
        coef (float): Pre-emphasis coefficient.

    Returns:
        array: Output filtered signal.

    See also:
        :func:`preemphasis`
    """
    b = np.array([1.], x.dtype)
    a = np.array([1., -coef], x.dtype)
    return signal.lfilter(b, a, x)