Skip to content

Commit e6907a1

Browse files
jam14jtensorflower-gardener
authored andcommitted
Remove old training_ops target and redirect references to the new target.
PiperOrigin-RevId: 573417544
1 parent 53c4dc6 commit e6907a1

File tree

2 files changed

+8
-10
lines changed

2 files changed

+8
-10
lines changed

tensorflow_probability/python/optimizer/sgld.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
from tensorflow_probability.python.internal import distribution_util
2121
from tensorflow_probability.python.internal import dtype_util
2222
from tensorflow_probability.python.math.diag_jacobian import diag_jacobian
23-
from tensorflow.python.training import training_ops
2423

2524

2625
__all__ = [
@@ -235,10 +234,10 @@ def _prepare(self, var_list):
235234
def _resource_apply_dense(self, grad, var):
236235
rms = self.get_slot(var, 'rms')
237236
new_grad = self._apply_noisy_update(rms, grad, var)
238-
return training_ops.resource_apply_gradient_descent(
239-
var.handle,
240-
tf.cast(self._learning_rate_tensor, var.dtype.base_dtype),
241-
new_grad,
237+
return tf.raw_ops.ResourceApplyGradientDescent(
238+
var=var.handle,
239+
alpha=tf.cast(self._learning_rate_tensor, var.dtype.base_dtype),
240+
delta=new_grad,
242241
use_locking=self._use_locking)
243242

244243
def _resource_apply_sparse(self, grad, var, indices):

tensorflow_probability/python/optimizer/variational_sgd.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
from tensorflow_probability.python.internal import assert_util
2020
from tensorflow_probability.python.internal import distribution_util
2121
from tensorflow_probability.python.internal import dtype_util
22-
from tensorflow.python.training import training_ops
2322

2423

2524
__all__ = [
@@ -236,10 +235,10 @@ def _resource_apply_dense(self, grad, var):
236235
tf.cast(max_learning_rate, var.dtype.base_dtype))
237236

238237
newgrad = grad * learn_rates
239-
return training_ops.resource_apply_gradient_descent(
240-
var.handle,
241-
tf.cast(1., var.dtype),
242-
newgrad,
238+
return tf.raw_ops.ResourceApplyGradientDescent(
239+
var=var.handle,
240+
alpha=tf.cast(1., var.dtype),
241+
delta=newgrad,
243242
use_locking=self._use_locking)
244243

245244
def _resource_apply_sparse(self, grad, var, indices):

0 commit comments

Comments
 (0)