Skip to content

Commit f0f1e3a

Browse files
Johannes Ballécopybara-github
authored andcommitted
Fixes deprecated int type warnings.
PiperOrigin-RevId: 424216143 Change-Id: Ic4b23bd75c5322407fda6b4a54832dbaff3139c2
1 parent 9011d92 commit f0f1e3a

10 files changed

+239
-256
lines changed

tensorflow_compression/cc/kernels/pmf_to_cdf_kernels.cc

Lines changed: 17 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ limitations under the License.
1616
#define EIGEN_USE_THREADS
1717

1818
#include <algorithm>
19+
#include <cstdint>
1920
#include <functional>
2021
#include <iterator>
2122
#include <numeric>
@@ -36,18 +37,13 @@ namespace tensorflow_compression {
3637
namespace {
3738
namespace thread = tensorflow::thread;
3839
using tensorflow::DEVICE_CPU;
39-
using tensorflow::int32;
40-
using tensorflow::int64;
4140
using tensorflow::OpKernel;
4241
using tensorflow::OpKernelConstruction;
4342
using tensorflow::OpKernelContext;
4443
using tensorflow::string;
4544
using tensorflow::Tensor;
4645
using tensorflow::TensorShape;
4746
using tensorflow::TensorShapeUtils;
48-
using tensorflow::uint32;
49-
using tensorflow::uint64;
50-
using tensorflow::uint8;
5147
using tensorflow::errors::InvalidArgument;
5248

5349
class PmfToCdfOp : public OpKernel {
@@ -74,12 +70,12 @@ class PmfToCdfOp : public OpKernel {
7470
OP_REQUIRES_OK(context, context->allocate_output(0, shape, &cdf_tensor));
7571

7672
auto pmf = pmf_tensor.flat_inner_dims<float, 2>();
77-
auto cdf = cdf_tensor->flat_inner_dims<int32, 2>();
73+
auto cdf = cdf_tensor->flat_inner_dims<int32_t, 2>();
7874
CHECK_EQ(pmf.dimension(0), cdf.dimension(0));
7975
CHECK_EQ(pmf.dimension(1) + 1, cdf.dimension(1));
8076

81-
for (int64 i = 0; i < pmf.dimension(0); ++i) {
82-
for (int64 j = 0; j < pmf.dimension(1); ++j) {
77+
for (int64_t i = 0; i < pmf.dimension(0); ++i) {
78+
for (int64_t j = 0; j < pmf.dimension(1); ++j) {
8379
auto value = pmf(i, j);
8480
OP_REQUIRES(
8581
context, std::isfinite(value) && value >= 0,
@@ -90,14 +86,14 @@ class PmfToCdfOp : public OpKernel {
9086
}
9187

9288
const double n = pmf.dimension(1);
93-
const int64 cost_per_unit = static_cast<int64>(50.0 * n * std::log2(n));
89+
const int64_t cost_per_unit = static_cast<int64_t>(50.0 * n * std::log2(n));
9490
thread::ThreadPool* thread_pool =
9591
context->device()->tensorflow_cpu_worker_threads()->workers;
9692
thread_pool->ParallelFor(
9793
pmf.dimension(0), cost_per_unit,
98-
[this, pmf, &cdf](int64 start, int64 limit) {
94+
[this, pmf, &cdf](int64_t start, int64_t limit) {
9995
const absl::Span<const float>::size_type pmf_size = pmf.dimension(1);
100-
for (int64 i = start; i < limit; ++i) {
96+
for (int64_t i = start; i < limit; ++i) {
10197
cdf(i, 0) = 0;
10298
PerShard({&pmf(i, 0), pmf_size}, {&cdf(i, 1), pmf_size});
10399
}
@@ -106,7 +102,7 @@ class PmfToCdfOp : public OpKernel {
106102

107103
private:
108104
struct PenaltyItem {
109-
PenaltyItem(int32* p, double mass) : pointer(p), mass(mass) {
105+
PenaltyItem(int32_t* p, double mass) : pointer(p), mass(mass) {
110106
penalty = ComputeNextPenalty();
111107
}
112108

@@ -127,13 +123,13 @@ class PmfToCdfOp : public OpKernel {
127123
return mass * (std::log2(*pointer) - std::log2(*pointer - 1));
128124
}
129125

130-
int32* pointer;
126+
int32_t* pointer;
131127
double mass;
132128
double penalty;
133129
};
134130

135131
struct GainItem {
136-
GainItem(int32* p, double mass) : pointer(p), mass(mass) {
132+
GainItem(int32_t* p, double mass) : pointer(p), mass(mass) {
137133
gain = ComputeNextGain();
138134
}
139135

@@ -155,28 +151,28 @@ class PmfToCdfOp : public OpKernel {
155151
return mass * (std::log2(*pointer + 1) - std::log2(*pointer));
156152
}
157153

158-
int32* pointer;
154+
int32_t* pointer;
159155
double mass;
160156
double gain;
161157
};
162158

163-
void PerShard(absl::Span<const float> pmf, absl::Span<int32> cdf) const {
159+
void PerShard(absl::Span<const float> pmf, absl::Span<int32_t> cdf) const {
164160
CHECK_EQ(pmf.size(), cdf.size());
165161

166-
const int32 normalizer = 1 << precision_;
162+
const int32_t normalizer = 1 << precision_;
167163
std::transform(pmf.begin(), pmf.end(), cdf.begin(),
168164
[normalizer](float mass) {
169-
int32 value = std::rint(mass * normalizer);
165+
int32_t value = std::rint(mass * normalizer);
170166
// NOTE: Consider checking if mass > 0.
171167
value = std::max(value, 1);
172168
return value;
173169
});
174170

175-
int32 sum = std::accumulate(cdf.begin(), cdf.end(), 0);
171+
int32_t sum = std::accumulate(cdf.begin(), cdf.end(), 0);
176172
if (sum > normalizer) {
177173
std::vector<PenaltyItem> queue;
178174
queue.reserve(cdf.size());
179-
for (absl::Span<int32>::size_type i = 0; i < cdf.size(); ++i) {
175+
for (absl::Span<int32_t>::size_type i = 0; i < cdf.size(); ++i) {
180176
queue.emplace_back(&cdf[i], pmf[i]);
181177
}
182178

@@ -193,7 +189,7 @@ class PmfToCdfOp : public OpKernel {
193189
} else if (sum < normalizer) {
194190
std::vector<GainItem> queue;
195191
queue.reserve(cdf.size());
196-
for (absl::Span<int32>::size_type i = 0; i < cdf.size(); ++i) {
192+
for (absl::Span<int32_t>::size_type i = 0; i < cdf.size(); ++i) {
197193
queue.emplace_back(&cdf[i], pmf[i]);
198194
}
199195

tensorflow_compression/cc/kernels/pmf_to_cdf_kernels_test.cc

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ limitations under the License.
1414
==============================================================================*/
1515

1616
#include <algorithm>
17+
#include <cstdint>
1718
#include <limits>
1819
#include <random>
1920

@@ -39,7 +40,6 @@ using tensorflow::NodeDefBuilder;
3940
using tensorflow::OpsTestBase;
4041
using tensorflow::ShapeInferenceTestOp;
4142
using tensorflow::Tensor;
42-
using tensorflow::TensorShape;
4343
using tensorflow::TTypes;
4444

4545
class PmfToQuantizedCdfOpTest : public OpsTestBase {
@@ -77,17 +77,18 @@ class PmfToQuantizedCdfOpTest : public OpsTestBase {
7777
}
7878

7979
auto pmf = pmf_tensor.flat_inner_dims<float, 2>();
80-
auto cdf = cdf_tensor.flat_inner_dims<int32, 2>();
80+
auto cdf = cdf_tensor.flat_inner_dims<int32_t, 2>();
8181
EXPECT_EQ(pmf.dimension(1) + 1, cdf.dimension(1));
8282

8383
const int normalizer = 1 << precision;
8484
for (int i = 0; i < pmf.dimension(0); ++i) {
8585
EXPECT_EQ(0, cdf(i, 0));
8686

87-
TTypes<int32>::UnalignedConstVec cdf_slice(&cdf(i, 0), cdf.dimension(1));
87+
TTypes<int32_t>::UnalignedConstVec cdf_slice(&cdf(i, 0),
88+
cdf.dimension(1));
8889

8990
for (int j = 1; j < cdf_slice.size(); ++j) {
90-
const int32 diff = cdf_slice(j) - cdf_slice(j - 1);
91+
const int32_t diff = cdf_slice(j) - cdf_slice(j - 1);
9192
EXPECT_GT(diff, 0);
9293
}
9394

@@ -104,7 +105,7 @@ TEST_F(PmfToQuantizedCdfOpTest, UnderSum) {
104105
std::random_device rd;
105106
random::PhiloxRandom gen(rd(), rd());
106107
random::SimplePhilox rand(&gen);
107-
for (int64 i = 0; i < matrix.dimension(0); ++i) {
108+
for (int64_t i = 0; i < matrix.dimension(0); ++i) {
108109
GenerateData(&rand, {&matrix(i, 0), n});
109110
}
110111

@@ -130,7 +131,7 @@ TEST_F(PmfToQuantizedCdfOpTest, OverSum) {
130131
std::random_device rd;
131132
random::PhiloxRandom gen(rd(), rd());
132133
random::SimplePhilox rand(&gen);
133-
for (int64 i = 0; i < matrix.dimension(0); ++i) {
134+
for (int64_t i = 0; i < matrix.dimension(0); ++i) {
134135
GenerateData(&rand, {&matrix(i, 0), n});
135136
}
136137

0 commit comments

Comments
 (0)