@@ -16,6 +16,7 @@ limitations under the License.
16
16
#define EIGEN_USE_THREADS
17
17
18
18
#include < algorithm>
19
+ #include < cstdint>
19
20
#include < functional>
20
21
#include < iterator>
21
22
#include < numeric>
@@ -36,18 +37,13 @@ namespace tensorflow_compression {
36
37
namespace {
37
38
namespace thread = tensorflow::thread;
38
39
using tensorflow::DEVICE_CPU;
39
- using tensorflow::int32;
40
- using tensorflow::int64;
41
40
using tensorflow::OpKernel;
42
41
using tensorflow::OpKernelConstruction;
43
42
using tensorflow::OpKernelContext;
44
43
using tensorflow::string;
45
44
using tensorflow::Tensor;
46
45
using tensorflow::TensorShape;
47
46
using tensorflow::TensorShapeUtils;
48
- using tensorflow::uint32;
49
- using tensorflow::uint64;
50
- using tensorflow::uint8;
51
47
using tensorflow::errors::InvalidArgument;
52
48
53
49
class PmfToCdfOp : public OpKernel {
@@ -74,12 +70,12 @@ class PmfToCdfOp : public OpKernel {
74
70
OP_REQUIRES_OK (context, context->allocate_output (0 , shape, &cdf_tensor));
75
71
76
72
auto pmf = pmf_tensor.flat_inner_dims <float , 2 >();
77
- auto cdf = cdf_tensor->flat_inner_dims <int32 , 2 >();
73
+ auto cdf = cdf_tensor->flat_inner_dims <int32_t , 2 >();
78
74
CHECK_EQ (pmf.dimension (0 ), cdf.dimension (0 ));
79
75
CHECK_EQ (pmf.dimension (1 ) + 1 , cdf.dimension (1 ));
80
76
81
- for (int64 i = 0 ; i < pmf.dimension (0 ); ++i) {
82
- for (int64 j = 0 ; j < pmf.dimension (1 ); ++j) {
77
+ for (int64_t i = 0 ; i < pmf.dimension (0 ); ++i) {
78
+ for (int64_t j = 0 ; j < pmf.dimension (1 ); ++j) {
83
79
auto value = pmf (i, j);
84
80
OP_REQUIRES (
85
81
context, std::isfinite (value) && value >= 0 ,
@@ -90,14 +86,14 @@ class PmfToCdfOp : public OpKernel {
90
86
}
91
87
92
88
const double n = pmf.dimension (1 );
93
- const int64 cost_per_unit = static_cast <int64 >(50.0 * n * std::log2 (n));
89
+ const int64_t cost_per_unit = static_cast <int64_t >(50.0 * n * std::log2 (n));
94
90
thread::ThreadPool* thread_pool =
95
91
context->device ()->tensorflow_cpu_worker_threads ()->workers ;
96
92
thread_pool->ParallelFor (
97
93
pmf.dimension (0 ), cost_per_unit,
98
- [this , pmf, &cdf](int64 start, int64 limit) {
94
+ [this , pmf, &cdf](int64_t start, int64_t limit) {
99
95
const absl::Span<const float >::size_type pmf_size = pmf.dimension (1 );
100
- for (int64 i = start; i < limit; ++i) {
96
+ for (int64_t i = start; i < limit; ++i) {
101
97
cdf (i, 0 ) = 0 ;
102
98
PerShard ({&pmf (i, 0 ), pmf_size}, {&cdf (i, 1 ), pmf_size});
103
99
}
@@ -106,7 +102,7 @@ class PmfToCdfOp : public OpKernel {
106
102
107
103
private:
108
104
struct PenaltyItem {
109
- PenaltyItem (int32 * p, double mass) : pointer(p), mass(mass) {
105
+ PenaltyItem (int32_t * p, double mass) : pointer(p), mass(mass) {
110
106
penalty = ComputeNextPenalty ();
111
107
}
112
108
@@ -127,13 +123,13 @@ class PmfToCdfOp : public OpKernel {
127
123
return mass * (std::log2 (*pointer) - std::log2 (*pointer - 1 ));
128
124
}
129
125
130
- int32 * pointer;
126
+ int32_t * pointer;
131
127
double mass;
132
128
double penalty;
133
129
};
134
130
135
131
struct GainItem {
136
- GainItem (int32 * p, double mass) : pointer(p), mass(mass) {
132
+ GainItem (int32_t * p, double mass) : pointer(p), mass(mass) {
137
133
gain = ComputeNextGain ();
138
134
}
139
135
@@ -155,28 +151,28 @@ class PmfToCdfOp : public OpKernel {
155
151
return mass * (std::log2 (*pointer + 1 ) - std::log2 (*pointer));
156
152
}
157
153
158
- int32 * pointer;
154
+ int32_t * pointer;
159
155
double mass;
160
156
double gain;
161
157
};
162
158
163
- void PerShard (absl::Span<const float > pmf, absl::Span<int32 > cdf) const {
159
+ void PerShard (absl::Span<const float > pmf, absl::Span<int32_t > cdf) const {
164
160
CHECK_EQ (pmf.size (), cdf.size ());
165
161
166
- const int32 normalizer = 1 << precision_;
162
+ const int32_t normalizer = 1 << precision_;
167
163
std::transform (pmf.begin (), pmf.end (), cdf.begin (),
168
164
[normalizer](float mass) {
169
- int32 value = std::rint (mass * normalizer);
165
+ int32_t value = std::rint (mass * normalizer);
170
166
// NOTE: Consider checking if mass > 0.
171
167
value = std::max (value, 1 );
172
168
return value;
173
169
});
174
170
175
- int32 sum = std::accumulate (cdf.begin (), cdf.end (), 0 );
171
+ int32_t sum = std::accumulate (cdf.begin (), cdf.end (), 0 );
176
172
if (sum > normalizer) {
177
173
std::vector<PenaltyItem> queue;
178
174
queue.reserve (cdf.size ());
179
- for (absl::Span<int32 >::size_type i = 0 ; i < cdf.size (); ++i) {
175
+ for (absl::Span<int32_t >::size_type i = 0 ; i < cdf.size (); ++i) {
180
176
queue.emplace_back (&cdf[i], pmf[i]);
181
177
}
182
178
@@ -193,7 +189,7 @@ class PmfToCdfOp : public OpKernel {
193
189
} else if (sum < normalizer) {
194
190
std::vector<GainItem> queue;
195
191
queue.reserve (cdf.size ());
196
- for (absl::Span<int32 >::size_type i = 0 ; i < cdf.size (); ++i) {
192
+ for (absl::Span<int32_t >::size_type i = 0 ; i < cdf.size (); ++i) {
197
193
queue.emplace_back (&cdf[i], pmf[i]);
198
194
}
199
195
0 commit comments