Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
:return: A Keras model instance.
"""
if not (len(conv_kernel_width) == len(conv_filters) == len(new_maps) == len(pooling_width)):
raise ValueError(
"conv_kernel_width,conv_filters,new_maps and pooling_width must have same length")
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
deep_emb_list, _ = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, init_std, seed)
fg_deep_emb_list,_ = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, init_std, seed,
prefix='fg')
fg_input = concat_func(fg_deep_emb_list, axis=1)
origin_input = concat_func(deep_emb_list, axis=1)
if len(conv_filters) > 0:
new_features = FGCNNLayer(
conv_filters, conv_kernel_width, new_maps, pooling_width)(fg_input)
combined_input = concat_func([origin_input, new_features], axis=1)
else:
combined_input = origin_input
inner_product = tf.keras.layers.Flatten()(InnerProductLayer()(
tf.keras.layers.Lambda(unstack, mask=[None] * int(combined_input.shape[1]))(combined_input)))
linear_signal = tf.keras.layers.Flatten()(combined_input)
dnn_input = tf.keras.layers.Concatenate()([linear_signal, inner_product])
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_att: float. L2 regularizer strength applied to attention net
:param afm_dropout: float in [0,1), Fraction of the attention net output units to dropout.
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
group_embedding_dict, _ = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, init_std,
seed, support_dense=False, support_group=True)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
if use_attention:
fm_logit = add_func([AFMLayer(attention_factor, l2_reg_att, afm_dropout,
seed)(list(v)) for k, v in group_embedding_dict.items() if k in fm_group])
else:
fm_logit = add_func([FM()(concat_func(v, axis=1))
for k, v in group_embedding_dict.items() if k in fm_group])
final_logit = add_func([linear_logit, fm_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
if len(dnn_hidden_units) <= 0 and att_layer_num <= 0:
raise ValueError("Either hidden_layer or att_layer_num must > 0")
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
att_input = concat_func(sparse_embedding_list, axis=1)
for _ in range(att_layer_num):
att_input = InteractingLayer(
att_embedding_size, att_head_num, att_res)(att_input)
att_output = tf.keras.layers.Flatten()(att_input)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
if len(dnn_hidden_units) > 0 and att_layer_num > 0: # Deep & Interacting Layer
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
:param l2_reg_cin: L2 regularizer strength applied to CIN.
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
fm_input = concat_func(sparse_embedding_list, axis=1)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_output)
final_logit = add_func([linear_logit, dnn_logit])
if len(cin_layer_size) > 0:
:param l2_reg_dnn: float . L2 regularizer strength applied to DNN
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param biout_dropout: When not ``None``, the probability we will drop out the output of BiInteractionPooling Layer.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in deep net
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
fm_input = concat_func(sparse_embedding_list, axis=1)
bi_out = BiInteractionPooling()(fm_input)
if bi_dropout:
bi_out = tf.keras.layers.Dropout(bi_dropout)(bi_out, training=None)
dnn_input = combined_dnn_input([bi_out], dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_output)
final_logit = add_func([linear_logit, dnn_logit])
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
group_embedding_dict, dense_value_list = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding,
init_std, seed, support_group=True)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
fm_logit = add_func([FM()(concat_func(v, axis=1))
for k, v in group_embedding_dict.items() if k in fm_group])
dnn_input = combined_dnn_input(list(chain.from_iterable(
group_embedding_dict.values())), dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_output)
final_logit = add_func([linear_logit, fm_logit, dnn_logit])
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_linear: float. L2 regularizer strength applied to linear weight
:param l2_reg_dnn: float . L2 regularizer strength applied to DNN
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, False, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
final_logit = add_func([dnn_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list,
outputs=output)
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
if not (len(conv_kernel_width) == len(conv_filters) == len(new_maps) == len(pooling_width)):
raise ValueError(
"conv_kernel_width,conv_filters,new_maps and pooling_width must have same length")
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
deep_emb_list, _ = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, init_std, seed)
fg_deep_emb_list,_ = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, init_std, seed,
prefix='fg')
fg_input = concat_func(fg_deep_emb_list, axis=1)
origin_input = concat_func(deep_emb_list, axis=1)
if len(conv_filters) > 0:
new_features = FGCNNLayer(
conv_filters, conv_kernel_width, new_maps, pooling_width)(fg_input)
combined_input = concat_func([origin_input, new_features], axis=1)
else:
combined_input = origin_input
inner_product = tf.keras.layers.Flatten()(InnerProductLayer()(
tf.keras.layers.Lambda(unstack, mask=[None] * int(combined_input.shape[1]))(combined_input)))
linear_signal = tf.keras.layers.Flatten()(combined_input)
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input)
dnn_logit = Dense(
1, use_bias=False, activation=None)(dnn_out)
final_logit = add_func([dnn_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
:param dnn_activation: Activation function to use in DNN
:param use_inner: bool,whether use inner-product or not.
:param use_outter: bool,whether use outter-product or not.
:param kernel_type: str,kernel_type used in outter-product,can be ``'mat'`` , ``'vec'`` or ``'num'``
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
if kernel_type not in ['mat', 'vec', 'num']:
raise ValueError("kernel_type must be mat,vec or num")
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
inner_product = tf.keras.layers.Flatten()(
InnerProductLayer()(sparse_embedding_list))
outter_product = OutterProductLayer(kernel_type)(sparse_embedding_list)
# ipnn deep input
linear_signal = tf.keras.layers.Reshape(
[len(sparse_embedding_list) * embedding_size])(concat_func(sparse_embedding_list))
if use_inner and use_outter:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, inner_product, outter_product])
elif use_inner:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, inner_product])
elif use_outter: