Skip to content

Commit ddae7c4

Browse files
Merge pull request #1245 from NeilGirdhar:master
PiperOrigin-RevId: 380851764
2 parents 8415c37 + 8048ef4 commit ddae7c4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+120
-120
lines changed

spinoffs/inference_gym/inference_gym/internal/datasets/synthetic_item_response_theory.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
question_ids = np.arange(100)
3434

3535
student_ids, question_ids = np.meshgrid(student_ids, question_ids)
36-
mask = rng.binomial(1, 0.75, size=student_ids.shape).astype(np.bool)
36+
mask = rng.binomial(1, 0.75, size=student_ids.shape).astype(np.bool_)
3737

3838
student_ids = student_ids[mask]
3939
question_ids = question_ids[mask]

spinoffs/inference_gym/inference_gym/targets/item_response_theory.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,7 @@ def _sparse_to_dense(self, student_ids, question_ids, correct):
252252
# tensor-valued datasets. Blocked by JAX/Numpy not implementing scatter_nd.
253253
dense_y = np.zeros([self._num_students, self._num_questions], np.float32)
254254
dense_y[student_ids, question_ids] = correct
255-
y_mask = np.zeros(dense_y.shape, np.bool)
255+
y_mask = np.zeros(dense_y.shape, np.bool_)
256256
y_mask[student_ids, question_ids] = True
257257
return dense_y, y_mask
258258

tensorflow_probability/python/bijectors/affine_test.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -611,8 +611,8 @@ def dynamic_run(fun, x_value, **kwargs):
611611
run(bijector.forward_log_det_jacobian, x, event_ndims=1))
612612

613613
def _testScaledIdentityComplexAdjoint(self, is_dynamic):
614-
shift_ = np.array(-0.5, dtype=np.complex)
615-
scale_ = np.array(4 + 2j, dtype=np.complex)
614+
shift_ = np.array(-0.5, dtype=np.complex128)
615+
scale_ = np.array(4 + 2j, dtype=np.complex128)
616616
shift = tf1.placeholder_with_default(
617617
shift_, shape=None if is_dynamic else [])
618618
scale = tf1.placeholder_with_default(
@@ -622,7 +622,7 @@ def _testScaledIdentityComplexAdjoint(self, is_dynamic):
622622
scale_identity_multiplier=scale,
623623
adjoint=True,
624624
validate_args=True)
625-
z = np.array([1., 2, 3], dtype=np.complex)
625+
z = np.array([1., 2, 3], dtype=np.complex128)
626626
y = bijector.forward(z)
627627
x = bijector.inverse(z)
628628
inv_fwd_z = bijector.inverse(tf.identity(y))

tensorflow_probability/python/bijectors/kumaraswamy_cdf_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def testBijectorConcentration1LogDetJacobianFiniteAtZero(self):
6363
concentration1=1., concentration0=concentration0)
6464
ildj = self.evaluate(
6565
bijector.inverse_log_det_jacobian(0., event_ndims=0))
66-
self.assertAllEqual(np.ones_like(ildj, dtype=np.bool), np.isfinite(ildj))
66+
self.assertAllEqual(np.ones_like(ildj, dtype=np.bool_), np.isfinite(ildj))
6767

6868
def testScalarCongruency(self):
6969
bijector_test_util.assert_scalar_congruency(

tensorflow_probability/python/bijectors/weibull_cdf_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def testBijectorConcentration1LogDetJacobianFiniteAtZero(self):
5959
scale = np.logspace(0.1, 10., num=20).astype(np.float32)
6060
bijector = tfb.WeibullCDF(scale, concentration=1.)
6161
fldj = self.evaluate(bijector.forward_log_det_jacobian(0., event_ndims=0))
62-
self.assertAllEqual(np.ones_like(fldj, dtype=np.bool), np.isfinite(fldj))
62+
self.assertAllEqual(np.ones_like(fldj, dtype=np.bool_), np.isfinite(fldj))
6363

6464
def testScalarCongruency(self):
6565
bijector_test_util.assert_scalar_congruency(

tensorflow_probability/python/distributions/beta_test.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ def testPdfXStretchedInBroadcastWhenLowerRank(self):
153153
def testLogPdfOnBoundaryIsFiniteWhenAlphaIsOne(self):
154154
b = [[0.01, 0.1, 1., 2], [5., 10., 2., 3]]
155155
pdf = self.evaluate(tfd.Beta(1., b, validate_args=True).prob(0.))
156-
self.assertAllEqual(np.ones_like(pdf, dtype=np.bool), np.isfinite(pdf))
156+
self.assertAllEqual(np.ones_like(pdf, dtype=np.bool_), np.isfinite(pdf))
157157

158158
def testBetaMean(self):
159159
a = [1., 2, 3]
@@ -298,8 +298,8 @@ def testBetaCdf(self, dt, rtol):
298298
b = 10. * np.random.random(shape).astype(dt)
299299
x = np.random.random(shape).astype(dt)
300300
actual = self.evaluate(tfd.Beta(a, b, validate_args=True).cdf(x))
301-
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
302-
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
301+
self.assertAllEqual(np.ones(shape, dtype=np.bool_), 0. <= x)
302+
self.assertAllEqual(np.ones(shape, dtype=np.bool_), 1. >= x)
303303
self.assertAllClose(sp_stats.beta.cdf(x, a, b), actual, rtol=rtol, atol=0)
304304

305305
def testBetaCdfBeyondSupport(self):
@@ -314,8 +314,8 @@ def testBetaLogCdf(self, dt, rtol):
314314
x = np.random.random(shape).astype(dt)
315315
actual = self.evaluate(
316316
tf.exp(tfd.Beta(a, b, validate_args=True).log_cdf(x)))
317-
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
318-
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
317+
self.assertAllEqual(np.ones(shape, dtype=np.bool_), 0. <= x)
318+
self.assertAllEqual(np.ones(shape, dtype=np.bool_), 1. >= x)
319319
self.assertAllClose(sp_stats.beta.cdf(x, a, b), actual, rtol=rtol, atol=0)
320320

321321
def testBetaBetaKL(self):

tensorflow_probability/python/distributions/cauchy_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def setUp(self):
4040

4141
def assertAllFinite(self, array):
4242
is_finite = np.isfinite(array)
43-
all_true = np.ones_like(is_finite, dtype=np.bool)
43+
all_true = np.ones_like(is_finite, dtype=np.bool_)
4444
self.assertAllEqual(all_true, is_finite)
4545

4646
def _testParamShapes(self, sample_shape, expected):

tensorflow_probability/python/distributions/dirichlet_test.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,13 +76,13 @@ def testLogPdfOnBoundaryIsFiniteWhenAlphaIsOne(self):
7676
dist = tfd.Dirichlet(concentration, validate_args=True)
7777
log_prob = self.evaluate(dist.log_prob(x))
7878
self.assertAllEqual(
79-
np.ones_like(log_prob, dtype=np.bool), np.isfinite(log_prob))
79+
np.ones_like(log_prob, dtype=np.bool_), np.isfinite(log_prob))
8080

8181
# Test when concentration[k] = 1., and x is zero at various dimensions.
8282
dist = tfd.Dirichlet(10 * [1.])
8383
log_prob = self.evaluate(dist.log_prob(x))
8484
self.assertAllEqual(
85-
np.ones_like(log_prob, dtype=np.bool), np.isfinite(log_prob))
85+
np.ones_like(log_prob, dtype=np.bool_), np.isfinite(log_prob))
8686

8787
def testPdfOnBoundaryWhenAlphaGreaterThanOne(self):
8888
dist = tfd.Dirichlet(9 * [2.])

tensorflow_probability/python/distributions/dpp_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ def testDppLogPDF(self, n_points):
257257
binary = bin(i)[2:]
258258
subset = [0] * n_points
259259
subset[-len(binary):] = [int(c) for c in binary]
260-
mask = np.array(subset, np.bool)
260+
mask = np.array(subset, np.bool_)
261261
submatrix = true_kernel[mask][:, mask]
262262
expected = (tf.linalg.logdet(submatrix) -
263263
tf.linalg.logdet(true_kernel +

tensorflow_probability/python/distributions/gamma_gamma_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def testGammaGammaLogPDFAtZero(self):
108108
validate_args=True)
109109
log_pdf = self.evaluate(gg.log_prob(0.))
110110
self.assertAllEqual(
111-
np.ones_like(log_pdf, dtype=np.bool), np.isfinite(log_pdf))
111+
np.ones_like(log_pdf, dtype=np.bool_), np.isfinite(log_pdf))
112112

113113
gg = tfd.GammaGamma(
114114
concentration=2.,

0 commit comments

Comments
 (0)