Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input)
dnn_logit = Dense(
1, use_bias=False, activation=None)(dnn_out)
final_logit = add_func([dnn_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model
def get_learner_score(features,feature_columns, region_number, l2_reg, init_std, seed,prefix='learner_',seq_mask_zero=True,task='binary'):
region_score = [PredictionLayer(task=task,use_bias=False)(
get_linear_logit(features, feature_columns, init_std=init_std, seed=seed + i, prefix=prefix + str(i + 1),
l2_reg=l2_reg)) for i in
range(region_number)]
return concat_func(region_score)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
fm_input = concat_func(sparse_embedding_list, axis=1)
bi_out = BiInteractionPooling()(fm_input)
if bi_dropout:
bi_out = tf.keras.layers.Dropout(bi_dropout)(bi_out, training=None)
dnn_input = combined_dnn_input([bi_out], dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_output)
final_logit = add_func([linear_logit, dnn_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, init_std, seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, False, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
final_logit = add_func([dnn_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list,
outputs=output)
return model
group_embedding_dict, _ = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, init_std,
seed, support_dense=False, support_group=True)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
if use_attention:
fm_logit = add_func([AFMLayer(attention_factor, l2_reg_att, afm_dropout,
seed)(list(v)) for k, v in group_embedding_dict.items() if k in fm_group])
else:
fm_logit = add_func([FM()(concat_func(v, axis=1))
for k, v in group_embedding_dict.items() if k in fm_group])
final_logit = add_func([linear_logit, fm_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
elif use_inner:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, inner_product])
elif use_outter:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, outter_product])
else:
deep_input = linear_signal
dnn_input = combined_dnn_input([deep_input], dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_out)
output = PredictionLayer(task)(dnn_logit)
model = tf.keras.models.Model(inputs=inputs_list,
outputs=output)
return model
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(dnn_input)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(dnn_output)
final_logit = add_func([linear_logit, dnn_logit])
if len(cin_layer_size) > 0:
exFM_out = CIN(cin_layer_size, cin_activation,
cin_split_half, l2_reg_cin, seed)(fm_input)
exFM_logit = tf.keras.layers.Dense(1, activation=None, )(exFM_out)
final_logit = add_func([final_logit, exFM_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
neg_concat_behavior = None
hist, aux_loss_1 = interest_evolution(keys_emb, query_emb, user_behavior_length, gru_type=gru_type,
use_neg=use_negsampling, neg_concat_behavior=neg_concat_behavior,
att_hidden_size=att_hidden_units,
att_activation=att_activation,
att_weight_normalization=att_weight_normalization, )
deep_input_emb = Concatenate()([deep_input_emb, hist])
deep_input_emb = tf.keras.layers.Flatten()(deep_input_emb)
dnn_input = combined_dnn_input([deep_input_emb], dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, use_bn, seed)(dnn_input)
final_logit = Dense(1, use_bias=False)(output)
output = PredictionLayer(task)(final_logit)
#model_input_list = get_inputs_list(
# [sparse_input, dense_input, user_behavior_input])
model_input_list = inputs_list
#if use_negsampling:
# model_input_list += list(neg_user_behavior_input.values())
model_input_list += [user_behavior_length]
model = tf.keras.models.Model(inputs=model_input_list, outputs=output)
if use_negsampling:
model.add_loss(alpha * aux_loss_1)
try:
tf.keras.backend.get_session().run(tf.global_variables_initializer())
width = conv_kernel_width[i - 1]
k = max(1, int((1 - pow(i / l, l - i)) * n)) if i < l else 3
conv_result = tf.keras.layers.Conv2D(filters=filters, kernel_size=(width, 1), strides=(1, 1), padding='same',
activation='tanh', use_bias=True, )(pooling_result)
pooling_result = KMaxPooling(
k=min(k, int(conv_result.shape[1])), axis=1)(conv_result)
flatten_result = tf.keras.layers.Flatten()(pooling_result)
dnn_out = DNN(dnn_hidden_units, l2_reg=l2_reg_dnn,
dropout_rate=dnn_dropout)(flatten_result)
dnn_logit = tf.keras.layers.Dense(1, use_bias=False)(dnn_out)
final_logit = add_func([dnn_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
lstm_outputs = BiLSTM(len(sess_feature_list) * embedding_size,
layers=2, res_layers=0, dropout_rate=0.2, )(sess_fea)
lstm_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True)(
[query_emb, lstm_outputs, user_sess_length])
deep_input_emb = Concatenate()(
[deep_input_emb, Flatten()(interest_attention_layer), Flatten()(lstm_attention_layer)])
if len(dense_input) > 0:
deep_input_emb = Concatenate()(
[deep_input_emb] + list(dense_input.values()))
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, dnn_use_bn, seed)(deep_input_emb)
output = Dense(1, use_bias=False, activation=None)(output)
output = PredictionLayer(task)(output)
sess_input_list = []
# sess_input_length_list = []
for i in range(sess_max_count):
sess_name = "sess_" + str(i)
sess_input_list.extend(get_inputs_list(
[user_behavior_input_dict[sess_name]]))
# sess_input_length_list.append(user_behavior_length_dict[sess_name])
model_input_list = get_inputs_list([sparse_input, dense_input]) + sess_input_list + [
user_sess_length]
model = Model(inputs=model_input_list, outputs=output)
return model