Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
:param dnn_activation: Activation function to use in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
if len(dnn_hidden_units) == 0 and cross_num == 0:
raise ValueError("Either hidden_layer or cross layer must > 0")
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
if len(dnn_hidden_units) > 0 and cross_num > 0: # Deep & Cross
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(dnn_input)
stack_out = tf.keras.layers.Concatenate()([cross_out, deep_out])
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(stack_out)
elif len(dnn_hidden_units) > 0: # Only Deep
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
elif cross_num > 0: # Only Cross
cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(dnn_input)
final_logit = tf.keras.layers.Dense(
"""
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
group_embedding_dict, dense_value_list = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding,
init_std, seed, support_group=True)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
fm_logit = add_func([FM()(concat_func(v, axis=1))
for k, v in group_embedding_dict.items() if k in fm_group])
dnn_input = combined_dnn_input(list(chain.from_iterable(
group_embedding_dict.values())), dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_output)
final_logit = add_func([linear_logit, fm_logit, dnn_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
senet_embedding_list = SENETLayer(
reduction_ratio, seed)(sparse_embedding_list)
senet_bilinear_out = BilinearInteraction(
bilinear_type=bilinear_type, seed=seed)(senet_embedding_list)
bilinear_out = BilinearInteraction(
bilinear_type=bilinear_type, seed=seed)(sparse_embedding_list)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
dnn_input = combined_dnn_input(
[Flatten()(concat_func([senet_bilinear_out, bilinear_out]))], dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input)
dnn_logit = Dense(
1, use_bias=False, activation=None)(dnn_out)
final_logit = add_func([linear_logit,dnn_logit])
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input)
dnn_logit = Dense(
1, use_bias=False, activation=None)(dnn_out)
final_logit = add_func([dnn_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model
"""
features = build_input_features(
linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
fm_input = concat_func(sparse_embedding_list, axis=1)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_output)
final_logit = add_func([linear_logit, dnn_logit])
if len(cin_layer_size) > 0:
exFM_out = CIN(cin_layer_size, cin_activation,
cin_split_half, l2_reg_cin, seed)(fm_input)
exFM_logit = tf.keras.layers.Dense(1, activation=None, )(exFM_out)
final_logit = add_func([final_logit, exFM_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
linear_signal = tf.keras.layers.Reshape(
[len(sparse_embedding_list) * embedding_size])(concat_func(sparse_embedding_list))
if use_inner and use_outter:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, inner_product, outter_product])
elif use_inner:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, inner_product])
elif use_outter:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, outter_product])
else:
deep_input = linear_signal
dnn_input = combined_dnn_input([deep_input], dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_out)
output = PredictionLayer(task)(dnn_logit)
model = tf.keras.models.Model(inputs=inputs_list,
outputs=output)
return model
neg_concat_behavior = concat_func(neg_uiseq_embed_list)
else:
neg_concat_behavior = None
hist, aux_loss_1 = interest_evolution(keys_emb, query_emb, user_behavior_length, gru_type=gru_type,
use_neg=use_negsampling, neg_concat_behavior=neg_concat_behavior,
att_hidden_size=att_hidden_units,
att_activation=att_activation,
att_weight_normalization=att_weight_normalization, )
deep_input_emb = Concatenate()([deep_input_emb, hist])
deep_input_emb = tf.keras.layers.Flatten()(deep_input_emb)
dnn_input = combined_dnn_input([deep_input_emb], dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, use_bn, seed)(dnn_input)
final_logit = Dense(1, use_bias=False)(output)
output = PredictionLayer(task)(final_logit)
#model_input_list = get_inputs_list(
# [sparse_input, dense_input, user_behavior_input])
model_input_list = inputs_list
#if use_negsampling:
# model_input_list += list(neg_user_behavior_input.values())
model_input_list += [user_behavior_length]
model = tf.keras.models.Model(inputs=model_input_list, outputs=output)
sess_fea = sess_interest_extractor(
tr_input, sess_max_count, Self_Attention)
interest_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True,
supports_masking=False)(
[query_emb, sess_fea, user_sess_length])
lstm_outputs = BiLSTM(hist_emb_size,
layers=2, res_layers=0, dropout_rate=0.2, )(sess_fea)
lstm_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True)(
[query_emb, lstm_outputs, user_sess_length])
dnn_input_emb = Concatenate()(
[dnn_input_emb, Flatten()(interest_attention_layer), Flatten()(lstm_attention_layer)])
dnn_input_emb = combined_dnn_input([dnn_input_emb], dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, dnn_use_bn, seed)(dnn_input_emb)
output = Dense(1, use_bias=False, activation=None)(output)
output = PredictionLayer(task)(output)
sess_input_list = []
# sess_input_length_list = []
for i in range(sess_max_count):
sess_name = "sess_" + str(i)
sess_input_list.extend(get_inputs_list(
[user_behavior_input_dict[sess_name]]))
# sess_input_length_list.append(user_behavior_length_dict[sess_name])
model = Model(inputs=inputs_list + [user_sess_length], outputs=output)
return model
if fc_j.use_hash:
j_input = Hash(fc_j.vocabulary_size)(j_input)
fc_i_embedding = feature_embedding(fc_i, fc_j, sparse_embedding, i_input)
fc_j_embedding = feature_embedding(fc_j, fc_i, sparse_embedding, j_input)
element_wise_prod = multiply([fc_i_embedding, fc_j_embedding])
if reduce_sum:
element_wise_prod = Lambda(lambda element_wise_prod: K.sum(
element_wise_prod, axis=-1))(element_wise_prod)
embed_list.append(element_wise_prod)
ffm_out = tf.keras.layers.Flatten()(concat_func(embed_list, axis=1))
if use_bn:
ffm_out = tf.keras.layers.BatchNormalization()(ffm_out)
dnn_input = combined_dnn_input([ffm_out], dense_value_list)
dnn_out = DNN(dnn_hidden_units, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout)(dnn_input)
dnn_logit = Dense(1, use_bias=False)(dnn_out)
final_logit = add_func([dnn_logit,linear_logit])
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model
sequence_embed_dict = varlen_embedding_lookup(embedding_dict,features,sparse_varlen_feature_columns)
sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, features, sparse_varlen_feature_columns,to_list=True)
dnn_input_emb_list += sequence_embed_list
keys_emb = concat_func(keys_emb_list, mask=True)
deep_input_emb = concat_func(dnn_input_emb_list)
query_emb = concat_func(query_emb_list, mask=True)
hist = AttentionSequencePoolingLayer(att_hidden_size, att_activation,
weight_normalization=att_weight_normalization, supports_masking=True)([
query_emb, keys_emb])
deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist])
deep_input_emb = Flatten()(deep_input_emb)
dnn_input = combined_dnn_input([deep_input_emb],dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, dnn_use_bn, seed)(dnn_input)
final_logit = Dense(1, use_bias=False)(output)
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model