Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
group_embedding_dict, dense_value_list = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding,
init_std, seed, support_group=True)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
fm_logit = add_func([FM()(concat_func(v, axis=1))
for k, v in group_embedding_dict.items() if k in fm_group])
dnn_input = combined_dnn_input(list(chain.from_iterable(
group_embedding_dict.values())), dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_output)
final_logit = add_func([linear_logit, fm_logit, dnn_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
att_input = concat_func(sparse_embedding_list, axis=1)
for _ in range(att_layer_num):
att_input = InteractingLayer(
att_embedding_size, att_head_num, att_res)(att_input)
att_output = tf.keras.layers.Flatten()(att_input)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
if len(dnn_hidden_units) > 0 and att_layer_num > 0: # Deep & Interacting Layer
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
stack_out = tf.keras.layers.Concatenate()([att_output, deep_out])
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(stack_out)
elif len(dnn_hidden_units) > 0: # Only Deep
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
elif att_layer_num > 0: # Only Interacting Layer
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(att_output)
else: # Error
raise NotImplementedError
final_logit = add_func([final_logit, linear_logit])
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
fm_input = concat_func(sparse_embedding_list, axis=1)
bi_out = BiInteractionPooling()(fm_input)
if bi_dropout:
bi_out = tf.keras.layers.Dropout(bi_dropout)(bi_out, training=None)
dnn_input = combined_dnn_input([bi_out], dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_output)
final_logit = add_func([linear_logit, dnn_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
"""
if len(dnn_hidden_units) == 0 and cross_num == 0:
raise ValueError("Either hidden_layer or cross layer must > 0")
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
if len(dnn_hidden_units) > 0 and cross_num > 0: # Deep & Cross
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(dnn_input)
stack_out = tf.keras.layers.Concatenate()([cross_out, deep_out])
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(stack_out)
elif len(dnn_hidden_units) > 0: # Only Deep
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
elif cross_num > 0: # Only Cross
cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(dnn_input)
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(cross_out)
else: # Error
raise NotImplementedError
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, False, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
final_logit = add_func([dnn_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list,
outputs=output)
return model
fg_input = concat_func(fg_deep_emb_list, axis=1)
origin_input = concat_func(deep_emb_list, axis=1)
if len(conv_filters) > 0:
new_features = FGCNNLayer(
conv_filters, conv_kernel_width, new_maps, pooling_width)(fg_input)
combined_input = concat_func([origin_input, new_features], axis=1)
else:
combined_input = origin_input
inner_product = tf.keras.layers.Flatten()(InnerProductLayer()(
tf.keras.layers.Lambda(unstack, mask=[None] * int(combined_input.shape[1]))(combined_input)))
linear_signal = tf.keras.layers.Flatten()(combined_input)
dnn_input = tf.keras.layers.Concatenate()([linear_signal, inner_product])
dnn_input = tf.keras.layers.Flatten()(dnn_input)
final_logit = DNN(dnn_hidden_units, dropout_rate=dnn_dropout,
l2_reg=l2_reg_dnn)(dnn_input)
final_logit = tf.keras.layers.Dense(1, use_bias=False)(final_logit)
final_logit = add_func([final_logit,linear_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
neg_concat_behavior = concat_func(neg_uiseq_embed_list)
else:
neg_concat_behavior = None
hist, aux_loss_1 = interest_evolution(keys_emb, query_emb, user_behavior_length, gru_type=gru_type,
use_neg=use_negsampling, neg_concat_behavior=neg_concat_behavior,
att_hidden_size=att_hidden_units,
att_activation=att_activation,
att_weight_normalization=att_weight_normalization, )
deep_input_emb = Concatenate()([deep_input_emb, hist])
deep_input_emb = tf.keras.layers.Flatten()(deep_input_emb)
dnn_input = combined_dnn_input([deep_input_emb], dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, use_bn, seed)(dnn_input)
final_logit = Dense(1, use_bias=False)(output)
output = PredictionLayer(task)(final_logit)
#model_input_list = get_inputs_list(
# [sparse_input, dense_input, user_behavior_input])
model_input_list = inputs_list
#if use_negsampling:
# model_input_list += list(neg_user_behavior_input.values())
model_input_list += [user_behavior_length]
model = tf.keras.models.Model(inputs=model_input_list, outputs=output)
if use_negsampling:
interest_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True,
supports_masking=False)(
[query_emb, sess_fea, user_sess_length])
lstm_outputs = BiLSTM(len(sess_feature_list) * embedding_size,
layers=2, res_layers=0, dropout_rate=0.2, )(sess_fea)
lstm_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True)(
[query_emb, lstm_outputs, user_sess_length])
deep_input_emb = Concatenate()(
[deep_input_emb, Flatten()(interest_attention_layer), Flatten()(lstm_attention_layer)])
if len(dense_input) > 0:
deep_input_emb = Concatenate()(
[deep_input_emb] + list(dense_input.values()))
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, dnn_use_bn, seed)(deep_input_emb)
output = Dense(1, use_bias=False, activation=None)(output)
output = PredictionLayer(task)(output)
sess_input_list = []
# sess_input_length_list = []
for i in range(sess_max_count):
sess_name = "sess_" + str(i)
sess_input_list.extend(get_inputs_list(
[user_behavior_input_dict[sess_name]]))
# sess_input_length_list.append(user_behavior_length_dict[sess_name])
model_input_list = get_inputs_list([sparse_input, dense_input]) + sess_input_list + [
user_sess_length]
model = Model(inputs=model_input_list, outputs=output)
from .activation import Dice
from .core import DNN, LocalActivationUnit, PredictionLayer
from .interaction import (CIN, FM, AFMLayer, BiInteractionPooling, CrossNet,
InnerProductLayer, InteractingLayer,
OutterProductLayer, FGCNNLayer,SENETLayer,BilinearInteraction)
from .normalization import LayerNormalization
from .sequence import (AttentionSequencePoolingLayer, BiasEncoding, BiLSTM,
KMaxPooling, SequencePoolingLayer,WeightedSequenceLayer,
Transformer, DynamicGRU)
from .utils import NoMask, Hash,Linear,Add
custom_objects = {'tf': tf,
'InnerProductLayer': InnerProductLayer,
'OutterProductLayer': OutterProductLayer,
'DNN': DNN,
'PredictionLayer': PredictionLayer,
'FM': FM,
'AFMLayer': AFMLayer,
'CrossNet': CrossNet,
'BiInteractionPooling': BiInteractionPooling,
'LocalActivationUnit': LocalActivationUnit,
'Dice': Dice,
'SequencePoolingLayer': SequencePoolingLayer,
'AttentionSequencePoolingLayer': AttentionSequencePoolingLayer,
'CIN': CIN,
'InteractingLayer': InteractingLayer,
'LayerNormalization': LayerNormalization,
'BiLSTM': BiLSTM,
'Transformer': Transformer,
'NoMask': NoMask,
'BiasEncoding': BiasEncoding,