Skip to content

Commit f8fde64

Browse files
committed
upgrade zero inflated lognormal loss, support export structure path, upgrade pre-commit hooks
1 parent e5521f6 commit f8fde64

File tree

7 files changed

+25
-21
lines changed

7 files changed

+25
-21
lines changed

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,8 @@ repos:
1010
rev: 5.12.0
1111
hooks:
1212
- id: isort
13-
- repo: https://github.com/google/yapf
14-
rev: v0.43.0
13+
- repo: https://github.com/pre-commit/mirrors-yapf
14+
rev: v0.32.0
1515
hooks:
1616
- id: yapf
1717
- repo: https://github.com/pre-commit/pre-commit-hooks

easy_rec/python/compat/embedding_parallel_saver.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -171,8 +171,8 @@ def _load_embed(
171171
embed_ids_o = part_id_o + embed_ids_o * len(embed_files)
172172
sel_ids = np.where(
173173
np.logical_and(
174-
(embed_ids_o % part_num) == part_id, embed_ids_o
175-
< embed_part_size * part_num
174+
(embed_ids_o % part_num) == part_id,
175+
embed_ids_o < embed_part_size * part_num
176176
)
177177
)[0]
178178
part_update_cnt += len(sel_ids)

easy_rec/python/compat/optimizers.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -497,20 +497,21 @@ def _get_grad_norm(grads_and_vars, embedding_parallel=False):
497497
sparse_norms.append(gen_nn_ops.l2_loss(grad.values))
498498
else:
499499
dense_norms.append(gen_nn_ops.l2_loss(grad))
500-
reduced_norms = hvd.grouped_allreduce(
501-
part_norms, op=hvd.Sum, compression=hvd.compression.NoneCompressor
502-
)
503-
sparse_norms = sparse_norms + reduced_norms
504-
all_norms = reduced_norms + dense_norms
500+
if hvd is not None and part_norms:
501+
reduced_norms = hvd.grouped_allreduce(
502+
part_norms, op=hvd.Sum, compression=hvd.compression.NoneCompressor
503+
)
504+
sparse_norms = sparse_norms + reduced_norms
505+
all_norms = sparse_norms + dense_norms
505506
sparse_norm = math_ops.sqrt(
506507
math_ops.reduce_sum(array_ops.stack(sparse_norms) * 2.0)
507-
)
508+
) if sparse_norms else tf.constant(0.0)
508509
dense_norm = math_ops.sqrt(
509510
math_ops.reduce_sum(array_ops.stack(dense_norms) * 2.0)
510-
)
511+
) if dense_norms else tf.constant(0.0)
511512
grad_norm = math_ops.sqrt(
512513
math_ops.reduce_sum(array_ops.stack(all_norms)) * 2.0
513-
)
514+
) if all_norms else tf.constant(0.0)
514515
return sparse_norm, dense_norm, grad_norm
515516

516517

easy_rec/python/main.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -209,13 +209,13 @@ def _metric_cmp_fn(best_eval_result, current_eval_result):
209209
)
210210
if export_config.metric_bigger:
211211
return (
212-
best_eval_result[export_config.best_exporter_metric]
213-
< current_eval_result[export_config.best_exporter_metric]
212+
best_eval_result[export_config.best_exporter_metric] <
213+
current_eval_result[export_config.best_exporter_metric]
214214
)
215215
else:
216216
return (
217-
best_eval_result[export_config.best_exporter_metric]
218-
> current_eval_result[export_config.best_exporter_metric]
217+
best_eval_result[export_config.best_exporter_metric] >
218+
current_eval_result[export_config.best_exporter_metric]
219219
)
220220

221221
exporters = [

easy_rec/python/model/deepfm.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
# -*- encoding:utf-8 -*-
22
# Copyright (c) Alibaba, Inc. and its affiliates.
3+
import logging
34
import tensorflow as tf
45

56
from easy_rec.python.layers import dnn, fm
@@ -47,6 +48,10 @@ def build_input_layer(self, model_config, feature_configs):
4748
if not has_final:
4849
assert model_config.deepfm.wide_output_dim == model_config.num_class
4950
self._wide_output_dim = model_config.deepfm.wide_output_dim
51+
if self._wide_output_dim != 1:
52+
logging.warning(
53+
'wide_output_dim not equal to 1, it is not a standard model'
54+
)
5055
super(DeepFM, self).build_input_layer(model_config, feature_configs)
5156

5257
def build_predict_graph(self):

easy_rec/python/model/easy_rec_estimator.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -322,8 +322,7 @@ def _train_model_fn(self, features, labels, run_config):
322322
'embedding_learning_rate_multiplier'
323323
):
324324
gradient_multipliers = {
325-
var:
326-
self.train_config.optimizer_config[0].
325+
var: self.train_config.optimizer_config[0].
327326
embedding_learning_rate_multiplier
328327
for var in tf.trainable_variables() if 'embedding_weights:' in var.name
329328
or '/embedding_weights/part_' in var.name

easy_rec/python/utils/shape_utils.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,9 +175,8 @@ def pad_or_clip_nd(tensor, output_shape):
175175
"""
176176
tensor_shape = tf.shape(tensor)
177177
clip_size = [
178-
tf.where(tensor_shape[i] -
179-
shape > 0, shape, -1) if shape is not None else -1
180-
for i, shape in enumerate(output_shape)
178+
tf.where(tensor_shape[i] - shape > 0, shape, -1)
179+
if shape is not None else -1 for i, shape in enumerate(output_shape)
181180
]
182181
clipped_tensor = tf.slice(
183182
tensor, begin=tf.zeros(len(clip_size), dtype=tf.int32), size=clip_size

0 commit comments

Comments
 (0)