|
17 | 17 | from __future__ import division |
18 | 18 | from __future__ import print_function |
19 | 19 |
|
| 20 | +import collections |
20 | 21 | import tensorflow as tf |
21 | 22 |
|
22 | 23 | from tensorflow_model_optimization.python.core.internal.tensor_encoding.core import core_encoder |
@@ -147,7 +148,6 @@ def from_encoder(cls, encoder, tensorspec): |
147 | 148 | if not tensorspec.shape.is_fully_defined(): |
148 | 149 | raise TypeError('The shape of provided tensorspec must be fully defined.') |
149 | 150 |
|
150 | | - tensorspec = tensorspec |
151 | 151 | commuting_structure = encoder.commuting_structure |
152 | 152 | state_update_aggregation_modes = tf.nest.flatten( |
153 | 153 | encoder.state_update_aggregation_modes) |
@@ -187,8 +187,8 @@ def from_encoder(cls, encoder, tensorspec): |
187 | 187 | # of the tensor_encoding tool do not even need to be aware of it. This |
188 | 188 | # argument is well supported for instance in the book of John Ousterhout, |
189 | 189 | # "A Philosophy of Software Design". |
190 | | - internal_structure = {} |
191 | | - internal_py_values = {} |
| 190 | + internal_structure = collections.OrderedDict() |
| 191 | + internal_py_values = collections.OrderedDict() |
192 | 192 |
|
193 | 193 | def _add_to_structure(key, value): |
194 | 194 | if key not in internal_structure: |
@@ -226,10 +226,10 @@ def get_params_fn(flat_state): |
226 | 226 | _, input_shapes_after_sum = ( |
227 | 227 | core_encoder.split_shapes_by_commuting_structure( |
228 | 228 | input_shapes, commuting_structure)) |
229 | | - decode_after_sum_params = { |
230 | | - _PARAMS: decode_after_sum_params, |
231 | | - _SHAPES: input_shapes_after_sum |
232 | | - } |
| 229 | + decode_after_sum_params = collections.OrderedDict([ |
| 230 | + (_PARAMS, decode_after_sum_params), |
| 231 | + (_SHAPES, input_shapes_after_sum), |
| 232 | + ]) |
233 | 233 |
|
234 | 234 | encode_params_py, encode_params_tf = py_utils.split_dict_py_tf( |
235 | 235 | encode_params) |
@@ -274,18 +274,18 @@ def encode_fn(x, params): |
274 | 274 | core_encoder.split_shapes_by_commuting_structure( |
275 | 275 | input_shapes, commuting_structure)) |
276 | 276 |
|
277 | | - encoded_structure = { |
278 | | - _TENSORS: encoded_x, |
279 | | - _SHAPES: input_shapes_before_sum |
280 | | - } |
| 277 | + encoded_structure = collections.OrderedDict([ |
| 278 | + (_TENSORS, encoded_x), |
| 279 | + (_SHAPES, input_shapes_before_sum), |
| 280 | + ]) |
281 | 281 | encoded_structure_py, encoded_structure_tf = py_utils.split_dict_py_tf( |
282 | 282 | encoded_structure) |
283 | 283 |
|
284 | 284 | _add_to_structure('encoded_structure', encoded_structure_tf) |
285 | 285 | _add_to_structure('state_update_tensors', state_update_tensors) |
286 | 286 | _add_to_py_values('encoded_structure', encoded_structure_py) |
287 | 287 |
|
288 | | - return (dict( |
| 288 | + return (collections.OrderedDict( |
289 | 289 | py_utils.flatten_with_joined_string_paths(encoded_structure_tf)), |
290 | 290 | tuple(tf.nest.flatten(state_update_tensors))) |
291 | 291 |
|
@@ -316,7 +316,7 @@ def decode_before_sum_fn(encoded_structure, params): |
316 | 316 |
|
317 | 317 | _add_to_structure('part_decoded_structure', part_decoded_structure) |
318 | 318 | if isinstance(part_decoded_structure, dict): |
319 | | - return dict( |
| 319 | + return collections.OrderedDict( |
320 | 320 | py_utils.flatten_with_joined_string_paths(part_decoded_structure)) |
321 | 321 | else: |
322 | 322 | return part_decoded_structure |
|
0 commit comments